webdav: remove unused methods

This commit is contained in:
Newnius 2021-09-20 09:43:00 -04:00
parent f3b2e54ef1
commit 9e965d6281
20 changed files with 78 additions and 11150 deletions

View File

@ -6,27 +6,14 @@ package webdav
import (
"context"
"encoding/xml"
"io"
"net/http"
"os"
"path"
"path/filepath"
"runtime"
"strings"
"sync"
"time"
)
// slashClean is equivalent to but slightly more efficient than
// path.Clean("/" + name).
func slashClean(name string) string {
if name == "" || name[0] != '/' {
name = "/" + name
}
return path.Clean(name)
}
// A FileSystem implements access to a collection of named files. The elements
// in a file path are separated by slash ('/', U+002F) characters, regardless
// of host operating system convention.
@ -75,7 +62,7 @@ func (d Dir) resolve(name string) string {
if dir == "" {
dir = "."
}
return filepath.Join(dir, filepath.FromSlash(slashClean(name)))
return filepath.Join(dir, filepath.FromSlash(path.Clean(name)))
}
func (d Dir) Mkdir(ctx context.Context, name string, perm os.FileMode) error {
@ -128,482 +115,6 @@ func (d Dir) Stat(ctx context.Context, name string) (os.FileInfo, error) {
return os.Stat(name)
}
// NewMemFS returns a new in-memory FileSystem implementation.
func NewMemFS() FileSystem {
return &memFS{
root: memFSNode{
children: make(map[string]*memFSNode),
mode: 0660 | os.ModeDir,
modTime: time.Now(),
},
}
}
// A memFS implements FileSystem, storing all metadata and actual file data
// in-memory. No limits on filesystem size are used, so it is not recommended
// this be used where the clients are untrusted.
//
// Concurrent access is permitted. The tree structure is protected by a mutex,
// and each node's contents and metadata are protected by a per-node mutex.
//
// TODO: Enforce file permissions.
type memFS struct {
mu sync.Mutex
root memFSNode
}
// TODO: clean up and rationalize the walk/find code.
// walk walks the directory tree for the fullname, calling f at each step. If f
// returns an error, the walk will be aborted and return that same error.
//
// dir is the directory at that step, frag is the name fragment, and final is
// whether it is the final step. For example, walking "/foo/bar/x" will result
// in 3 calls to f:
// - "/", "foo", false
// - "/foo/", "bar", false
// - "/foo/bar/", "x", true
// The frag argument will be empty only if dir is the root node and the walk
// ends at that root node.
func (fs *memFS) walk(op, fullname string, f func(dir *memFSNode, frag string, final bool) error) error {
original := fullname
fullname = slashClean(fullname)
// Strip any leading "/"s to make fullname a relative path, as the walk
// starts at fs.root.
if fullname[0] == '/' {
fullname = fullname[1:]
}
dir := &fs.root
for {
frag, remaining := fullname, ""
i := strings.IndexRune(fullname, '/')
final := i < 0
if !final {
frag, remaining = fullname[:i], fullname[i+1:]
}
if frag == "" && dir != &fs.root {
panic("webdav: empty path fragment for a clean path")
}
if err := f(dir, frag, final); err != nil {
return &os.PathError{
Op: op,
Path: original,
Err: err,
}
}
if final {
break
}
child := dir.children[frag]
if child == nil {
return &os.PathError{
Op: op,
Path: original,
Err: os.ErrNotExist,
}
}
if !child.mode.IsDir() {
return &os.PathError{
Op: op,
Path: original,
Err: os.ErrInvalid,
}
}
dir, fullname = child, remaining
}
return nil
}
// find returns the parent of the named node and the relative name fragment
// from the parent to the child. For example, if finding "/foo/bar/baz" then
// parent will be the node for "/foo/bar" and frag will be "baz".
//
// If the fullname names the root node, then parent, frag and err will be zero.
//
// find returns an error if the parent does not already exist or the parent
// isn't a directory, but it will not return an error per se if the child does
// not already exist. The error returned is either nil or an *os.PathError
// whose Op is op.
func (fs *memFS) find(op, fullname string) (parent *memFSNode, frag string, err error) {
err = fs.walk(op, fullname, func(parent0 *memFSNode, frag0 string, final bool) error {
if !final {
return nil
}
if frag0 != "" {
parent, frag = parent0, frag0
}
return nil
})
return parent, frag, err
}
func (fs *memFS) Mkdir(ctx context.Context, name string, perm os.FileMode) error {
fs.mu.Lock()
defer fs.mu.Unlock()
dir, frag, err := fs.find("mkdir", name)
if err != nil {
return err
}
if dir == nil {
// We can't create the root.
return os.ErrInvalid
}
if _, ok := dir.children[frag]; ok {
return os.ErrExist
}
dir.children[frag] = &memFSNode{
children: make(map[string]*memFSNode),
mode: perm.Perm() | os.ModeDir,
modTime: time.Now(),
}
return nil
}
func (fs *memFS) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) {
fs.mu.Lock()
defer fs.mu.Unlock()
dir, frag, err := fs.find("open", name)
if err != nil {
return nil, err
}
var n *memFSNode
if dir == nil {
// We're opening the root.
if runtime.GOOS == "zos" {
if flag&os.O_WRONLY != 0 {
return nil, os.ErrPermission
}
} else {
if flag&(os.O_WRONLY|os.O_RDWR) != 0 {
return nil, os.ErrPermission
}
}
n, frag = &fs.root, "/"
} else {
n = dir.children[frag]
if flag&(os.O_SYNC|os.O_APPEND) != 0 {
// memFile doesn't support these flags yet.
return nil, os.ErrInvalid
}
if flag&os.O_CREATE != 0 {
if flag&os.O_EXCL != 0 && n != nil {
return nil, os.ErrExist
}
if n == nil {
n = &memFSNode{
mode: perm.Perm(),
}
dir.children[frag] = n
}
}
if n == nil {
return nil, os.ErrNotExist
}
if flag&(os.O_WRONLY|os.O_RDWR) != 0 && flag&os.O_TRUNC != 0 {
n.mu.Lock()
n.data = nil
n.mu.Unlock()
}
}
children := make([]os.FileInfo, 0, len(n.children))
for cName, c := range n.children {
children = append(children, c.stat(cName))
}
return &memFile{
n: n,
nameSnapshot: frag,
childrenSnapshot: children,
}, nil
}
func (fs *memFS) RemoveAll(ctx context.Context, name string) error {
fs.mu.Lock()
defer fs.mu.Unlock()
dir, frag, err := fs.find("remove", name)
if err != nil {
return err
}
if dir == nil {
// We can't remove the root.
return os.ErrInvalid
}
delete(dir.children, frag)
return nil
}
func (fs *memFS) Rename(ctx context.Context, oldName, newName string) error {
fs.mu.Lock()
defer fs.mu.Unlock()
oldName = slashClean(oldName)
newName = slashClean(newName)
if oldName == newName {
return nil
}
if strings.HasPrefix(newName, oldName+"/") {
// We can't rename oldName to be a sub-directory of itself.
return os.ErrInvalid
}
oDir, oFrag, err := fs.find("rename", oldName)
if err != nil {
return err
}
if oDir == nil {
// We can't rename from the root.
return os.ErrInvalid
}
nDir, nFrag, err := fs.find("rename", newName)
if err != nil {
return err
}
if nDir == nil {
// We can't rename to the root.
return os.ErrInvalid
}
oNode, ok := oDir.children[oFrag]
if !ok {
return os.ErrNotExist
}
if oNode.children != nil {
if nNode, ok := nDir.children[nFrag]; ok {
if nNode.children == nil {
return errNotADirectory
}
if len(nNode.children) != 0 {
return errDirectoryNotEmpty
}
}
}
delete(oDir.children, oFrag)
nDir.children[nFrag] = oNode
return nil
}
func (fs *memFS) Stat(ctx context.Context, name string) (os.FileInfo, error) {
fs.mu.Lock()
defer fs.mu.Unlock()
dir, frag, err := fs.find("stat", name)
if err != nil {
return nil, err
}
if dir == nil {
// We're stat'ting the root.
return fs.root.stat("/"), nil
}
if n, ok := dir.children[frag]; ok {
return n.stat(path.Base(name)), nil
}
return nil, os.ErrNotExist
}
// A memFSNode represents a single entry in the in-memory filesystem and also
// implements os.FileInfo.
type memFSNode struct {
// children is protected by memFS.mu.
children map[string]*memFSNode
mu sync.Mutex
data []byte
mode os.FileMode
modTime time.Time
deadProps map[xml.Name]Property
}
func (n *memFSNode) stat(name string) *memFileInfo {
n.mu.Lock()
defer n.mu.Unlock()
return &memFileInfo{
name: name,
size: int64(len(n.data)),
mode: n.mode,
modTime: n.modTime,
}
}
func (n *memFSNode) DeadProps() (map[xml.Name]Property, error) {
n.mu.Lock()
defer n.mu.Unlock()
if len(n.deadProps) == 0 {
return nil, nil
}
ret := make(map[xml.Name]Property, len(n.deadProps))
for k, v := range n.deadProps {
ret[k] = v
}
return ret, nil
}
func (n *memFSNode) Patch(patches []Proppatch) ([]Propstat, error) {
n.mu.Lock()
defer n.mu.Unlock()
pstat := Propstat{Status: http.StatusOK}
for _, patch := range patches {
for _, p := range patch.Props {
pstat.Props = append(pstat.Props, Property{XMLName: p.XMLName})
if patch.Remove {
delete(n.deadProps, p.XMLName)
continue
}
if n.deadProps == nil {
n.deadProps = map[xml.Name]Property{}
}
n.deadProps[p.XMLName] = p
}
}
return []Propstat{pstat}, nil
}
type memFileInfo struct {
name string
size int64
mode os.FileMode
modTime time.Time
}
func (f *memFileInfo) Name() string { return f.name }
func (f *memFileInfo) Size() int64 { return f.size }
func (f *memFileInfo) Mode() os.FileMode { return f.mode }
func (f *memFileInfo) ModTime() time.Time { return f.modTime }
func (f *memFileInfo) IsDir() bool { return f.mode.IsDir() }
func (f *memFileInfo) Sys() interface{} { return nil }
// A memFile is a File implementation for a memFSNode. It is a per-file (not
// per-node) read/write position, and a snapshot of the memFS' tree structure
// (a node's name and children) for that node.
type memFile struct {
n *memFSNode
nameSnapshot string
childrenSnapshot []os.FileInfo
// pos is protected by n.mu.
pos int
}
// A *memFile implements the optional DeadPropsHolder interface.
var _ DeadPropsHolder = (*memFile)(nil)
func (f *memFile) DeadProps() (map[xml.Name]Property, error) { return f.n.DeadProps() }
func (f *memFile) Patch(patches []Proppatch) ([]Propstat, error) { return f.n.Patch(patches) }
func (f *memFile) Close() error {
return nil
}
func (f *memFile) Read(p []byte) (int, error) {
f.n.mu.Lock()
defer f.n.mu.Unlock()
if f.n.mode.IsDir() {
return 0, os.ErrInvalid
}
if f.pos >= len(f.n.data) {
return 0, io.EOF
}
n := copy(p, f.n.data[f.pos:])
f.pos += n
return n, nil
}
func (f *memFile) Readdir(count int) ([]os.FileInfo, error) {
f.n.mu.Lock()
defer f.n.mu.Unlock()
if !f.n.mode.IsDir() {
return nil, os.ErrInvalid
}
old := f.pos
if old >= len(f.childrenSnapshot) {
// The os.File Readdir docs say that at the end of a directory,
// the error is io.EOF if count > 0 and nil if count <= 0.
if count > 0 {
return nil, io.EOF
}
return nil, nil
}
if count > 0 {
f.pos += count
if f.pos > len(f.childrenSnapshot) {
f.pos = len(f.childrenSnapshot)
}
} else {
f.pos = len(f.childrenSnapshot)
old = 0
}
return f.childrenSnapshot[old:f.pos], nil
}
func (f *memFile) Seek(offset int64, whence int) (int64, error) {
f.n.mu.Lock()
defer f.n.mu.Unlock()
npos := f.pos
// TODO: How to handle offsets greater than the size of system int?
switch whence {
case os.SEEK_SET:
npos = int(offset)
case os.SEEK_CUR:
npos += int(offset)
case os.SEEK_END:
npos = len(f.n.data) + int(offset)
default:
npos = -1
}
if npos < 0 {
return 0, os.ErrInvalid
}
f.pos = npos
return int64(f.pos), nil
}
func (f *memFile) Stat() (os.FileInfo, error) {
return f.n.stat(f.nameSnapshot), nil
}
func (f *memFile) Write(p []byte) (int, error) {
lenp := len(p)
f.n.mu.Lock()
defer f.n.mu.Unlock()
if f.n.mode.IsDir() {
return 0, os.ErrInvalid
}
if f.pos < len(f.n.data) {
n := copy(f.n.data[f.pos:], p)
f.pos += n
p = p[n:]
} else if f.pos > len(f.n.data) {
// Write permits the creation of holes, if we've seek'ed past the
// existing end of file.
if f.pos <= cap(f.n.data) {
oldLen := len(f.n.data)
f.n.data = f.n.data[:f.pos]
hole := f.n.data[oldLen:]
for i := range hole {
hole[i] = 0
}
} else {
d := make([]byte, f.pos, f.pos+len(p))
copy(d, f.n.data)
f.n.data = d
}
}
if len(p) > 0 {
// We should only get here if f.pos == len(f.n.data).
f.n.data = append(f.n.data, p...)
f.pos = len(f.n.data)
}
f.n.modTime = time.Now()
return lenp, nil
}
// moveFiles moves files and/or directories from src to dst.
//
// See section 9.9.4 for when various HTTP status codes apply.

View File

@ -526,327 +526,6 @@ func TestDir(t *testing.T) {
testFS(t, Dir(td))
}
func TestMemFS(t *testing.T) {
testFS(t, NewMemFS())
}
func TestMemFSRoot(t *testing.T) {
ctx := context.Background()
fs := NewMemFS()
for i := 0; i < 5; i++ {
stat, err := fs.Stat(ctx, "/")
if err != nil {
t.Fatalf("i=%d: Stat: %v", i, err)
}
if !stat.IsDir() {
t.Fatalf("i=%d: Stat.IsDir is false, want true", i)
}
f, err := fs.OpenFile(ctx, "/", os.O_RDONLY, 0)
if err != nil {
t.Fatalf("i=%d: OpenFile: %v", i, err)
}
defer f.Close()
children, err := f.Readdir(-1)
if err != nil {
t.Fatalf("i=%d: Readdir: %v", i, err)
}
if len(children) != i {
t.Fatalf("i=%d: got %d children, want %d", i, len(children), i)
}
if _, err := f.Write(make([]byte, 1)); err == nil {
t.Fatalf("i=%d: Write: got nil error, want non-nil", i)
}
if err := fs.Mkdir(ctx, fmt.Sprintf("/dir%d", i), 0777); err != nil {
t.Fatalf("i=%d: Mkdir: %v", i, err)
}
}
}
func TestMemFileReaddir(t *testing.T) {
ctx := context.Background()
fs := NewMemFS()
if err := fs.Mkdir(ctx, "/foo", 0777); err != nil {
t.Fatalf("Mkdir: %v", err)
}
readdir := func(count int) ([]os.FileInfo, error) {
f, err := fs.OpenFile(ctx, "/foo", os.O_RDONLY, 0)
if err != nil {
t.Fatalf("OpenFile: %v", err)
}
defer f.Close()
return f.Readdir(count)
}
if got, err := readdir(-1); len(got) != 0 || err != nil {
t.Fatalf("readdir(-1): got %d fileInfos with err=%v, want 0, <nil>", len(got), err)
}
if got, err := readdir(+1); len(got) != 0 || err != io.EOF {
t.Fatalf("readdir(+1): got %d fileInfos with err=%v, want 0, EOF", len(got), err)
}
}
func TestMemFile(t *testing.T) {
testCases := []string{
"wantData ",
"wantSize 0",
"write abc",
"wantData abc",
"write de",
"wantData abcde",
"wantSize 5",
"write 5*x",
"write 4*y+2*z",
"write 3*st",
"wantData abcdexxxxxyyyyzzststst",
"wantSize 22",
"seek set 4 want 4",
"write EFG",
"wantData abcdEFGxxxyyyyzzststst",
"wantSize 22",
"seek set 2 want 2",
"read cdEF",
"read Gx",
"seek cur 0 want 8",
"seek cur 2 want 10",
"seek cur -1 want 9",
"write J",
"wantData abcdEFGxxJyyyyzzststst",
"wantSize 22",
"seek cur -4 want 6",
"write ghijk",
"wantData abcdEFghijkyyyzzststst",
"wantSize 22",
"read yyyz",
"seek cur 0 want 15",
"write ",
"seek cur 0 want 15",
"read ",
"seek cur 0 want 15",
"seek end -3 want 19",
"write ZZ",
"wantData abcdEFghijkyyyzzstsZZt",
"wantSize 22",
"write 4*A",
"wantData abcdEFghijkyyyzzstsZZAAAA",
"wantSize 25",
"seek end 0 want 25",
"seek end -5 want 20",
"read Z+4*A",
"write 5*B",
"wantData abcdEFghijkyyyzzstsZZAAAABBBBB",
"wantSize 30",
"seek end 10 want 40",
"write C",
"wantData abcdEFghijkyyyzzstsZZAAAABBBBB..........C",
"wantSize 41",
"write D",
"wantData abcdEFghijkyyyzzstsZZAAAABBBBB..........CD",
"wantSize 42",
"seek set 43 want 43",
"write E",
"wantData abcdEFghijkyyyzzstsZZAAAABBBBB..........CD.E",
"wantSize 44",
"seek set 0 want 0",
"write 5*123456789_",
"wantData 123456789_123456789_123456789_123456789_123456789_",
"wantSize 50",
"seek cur 0 want 50",
"seek cur -99 want err",
}
ctx := context.Background()
const filename = "/foo"
fs := NewMemFS()
f, err := fs.OpenFile(ctx, filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
t.Fatalf("OpenFile: %v", err)
}
defer f.Close()
for i, tc := range testCases {
j := strings.IndexByte(tc, ' ')
if j < 0 {
t.Fatalf("test case #%d %q: invalid command", i, tc)
}
op, arg := tc[:j], tc[j+1:]
// Expand an arg like "3*a+2*b" to "aaabb".
parts := strings.Split(arg, "+")
for j, part := range parts {
if k := strings.IndexByte(part, '*'); k >= 0 {
repeatCount, repeatStr := part[:k], part[k+1:]
n, err := strconv.Atoi(repeatCount)
if err != nil {
t.Fatalf("test case #%d %q: invalid repeat count %q", i, tc, repeatCount)
}
parts[j] = strings.Repeat(repeatStr, n)
}
}
arg = strings.Join(parts, "")
switch op {
default:
t.Fatalf("test case #%d %q: invalid operation %q", i, tc, op)
case "read":
buf := make([]byte, len(arg))
if _, err := io.ReadFull(f, buf); err != nil {
t.Fatalf("test case #%d %q: ReadFull: %v", i, tc, err)
}
if got := string(buf); got != arg {
t.Fatalf("test case #%d %q:\ngot %q\nwant %q", i, tc, got, arg)
}
case "seek":
parts := strings.Split(arg, " ")
if len(parts) != 4 {
t.Fatalf("test case #%d %q: invalid seek", i, tc)
}
whence := 0
switch parts[0] {
default:
t.Fatalf("test case #%d %q: invalid seek whence", i, tc)
case "set":
whence = os.SEEK_SET
case "cur":
whence = os.SEEK_CUR
case "end":
whence = os.SEEK_END
}
offset, err := strconv.Atoi(parts[1])
if err != nil {
t.Fatalf("test case #%d %q: invalid offset %q", i, tc, parts[1])
}
if parts[2] != "want" {
t.Fatalf("test case #%d %q: invalid seek", i, tc)
}
if parts[3] == "err" {
_, err := f.Seek(int64(offset), whence)
if err == nil {
t.Fatalf("test case #%d %q: Seek returned nil error, want non-nil", i, tc)
}
} else {
got, err := f.Seek(int64(offset), whence)
if err != nil {
t.Fatalf("test case #%d %q: Seek: %v", i, tc, err)
}
want, err := strconv.Atoi(parts[3])
if err != nil {
t.Fatalf("test case #%d %q: invalid want %q", i, tc, parts[3])
}
if got != int64(want) {
t.Fatalf("test case #%d %q: got %d, want %d", i, tc, got, want)
}
}
case "write":
n, err := f.Write([]byte(arg))
if err != nil {
t.Fatalf("test case #%d %q: write: %v", i, tc, err)
}
if n != len(arg) {
t.Fatalf("test case #%d %q: write returned %d bytes, want %d", i, tc, n, len(arg))
}
case "wantData":
g, err := fs.OpenFile(ctx, filename, os.O_RDONLY, 0666)
if err != nil {
t.Fatalf("test case #%d %q: OpenFile: %v", i, tc, err)
}
gotBytes, err := ioutil.ReadAll(g)
if err != nil {
t.Fatalf("test case #%d %q: ReadAll: %v", i, tc, err)
}
for i, c := range gotBytes {
if c == '\x00' {
gotBytes[i] = '.'
}
}
got := string(gotBytes)
if got != arg {
t.Fatalf("test case #%d %q:\ngot %q\nwant %q", i, tc, got, arg)
}
if err := g.Close(); err != nil {
t.Fatalf("test case #%d %q: Close: %v", i, tc, err)
}
case "wantSize":
n, err := strconv.Atoi(arg)
if err != nil {
t.Fatalf("test case #%d %q: invalid size %q", i, tc, arg)
}
fi, err := fs.Stat(ctx, filename)
if err != nil {
t.Fatalf("test case #%d %q: Stat: %v", i, tc, err)
}
if got, want := fi.Size(), int64(n); got != want {
t.Fatalf("test case #%d %q: got %d, want %d", i, tc, got, want)
}
}
}
}
// TestMemFileWriteAllocs tests that writing N consecutive 1KiB chunks to a
// memFile doesn't allocate a new buffer for each of those N times. Otherwise,
// calling io.Copy(aMemFile, src) is likely to have quadratic complexity.
func TestMemFileWriteAllocs(t *testing.T) {
if runtime.Compiler == "gccgo" {
t.Skip("gccgo allocates here")
}
ctx := context.Background()
fs := NewMemFS()
f, err := fs.OpenFile(ctx, "/xxx", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
t.Fatalf("OpenFile: %v", err)
}
defer f.Close()
xxx := make([]byte, 1024)
for i := range xxx {
xxx[i] = 'x'
}
a := testing.AllocsPerRun(100, func() {
f.Write(xxx)
})
// AllocsPerRun returns an integral value, so we compare the rounded-down
// number to zero.
if a > 0 {
t.Fatalf("%v allocs per run, want 0", a)
}
}
func BenchmarkMemFileWrite(b *testing.B) {
ctx := context.Background()
fs := NewMemFS()
xxx := make([]byte, 1024)
for i := range xxx {
xxx[i] = 'x'
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
f, err := fs.OpenFile(ctx, "/xxx", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
b.Fatalf("OpenFile: %v", err)
}
for j := 0; j < 100; j++ {
f.Write(xxx)
}
if err := f.Close(); err != nil {
b.Fatalf("Close: %v", err)
}
if err := fs.RemoveAll(ctx, "/xxx"); err != nil {
b.Fatalf("RemoveAll: %v", err)
}
}
}
func TestCopyMoveProps(t *testing.T) {
ctx := context.Background()
fs := NewMemFS()

View File

@ -1,173 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package webdav
// The If header is covered by Section 10.4.
// http://www.webdav.org/specs/rfc4918.html#HEADER_If
import (
"strings"
)
// ifHeader is a disjunction (OR) of ifLists.
type ifHeader struct {
lists []ifList
}
// ifList is a conjunction (AND) of Conditions, and an optional resource tag.
type ifList struct {
resourceTag string
conditions []Condition
}
// parseIfHeader parses the "If: foo bar" HTTP header. The httpHeader string
// should omit the "If:" prefix and have any "\r\n"s collapsed to a " ", as is
// returned by req.Header.Get("If") for a http.Request req.
func parseIfHeader(httpHeader string) (h ifHeader, ok bool) {
s := strings.TrimSpace(httpHeader)
switch tokenType, _, _ := lex(s); tokenType {
case '(':
return parseNoTagLists(s)
case angleTokenType:
return parseTaggedLists(s)
default:
return ifHeader{}, false
}
}
func parseNoTagLists(s string) (h ifHeader, ok bool) {
for {
l, remaining, ok := parseList(s)
if !ok {
return ifHeader{}, false
}
h.lists = append(h.lists, l)
if remaining == "" {
return h, true
}
s = remaining
}
}
func parseTaggedLists(s string) (h ifHeader, ok bool) {
resourceTag, n := "", 0
for first := true; ; first = false {
tokenType, tokenStr, remaining := lex(s)
switch tokenType {
case angleTokenType:
if !first && n == 0 {
return ifHeader{}, false
}
resourceTag, n = tokenStr, 0
s = remaining
case '(':
n++
l, remaining, ok := parseList(s)
if !ok {
return ifHeader{}, false
}
l.resourceTag = resourceTag
h.lists = append(h.lists, l)
if remaining == "" {
return h, true
}
s = remaining
default:
return ifHeader{}, false
}
}
}
func parseList(s string) (l ifList, remaining string, ok bool) {
tokenType, _, s := lex(s)
if tokenType != '(' {
return ifList{}, "", false
}
for {
tokenType, _, remaining = lex(s)
if tokenType == ')' {
if len(l.conditions) == 0 {
return ifList{}, "", false
}
return l, remaining, true
}
c, remaining, ok := parseCondition(s)
if !ok {
return ifList{}, "", false
}
l.conditions = append(l.conditions, c)
s = remaining
}
}
func parseCondition(s string) (c Condition, remaining string, ok bool) {
tokenType, tokenStr, s := lex(s)
if tokenType == notTokenType {
c.Not = true
tokenType, tokenStr, s = lex(s)
}
switch tokenType {
case strTokenType, angleTokenType:
c.Token = tokenStr
case squareTokenType:
c.ETag = tokenStr
default:
return Condition{}, "", false
}
return c, s, true
}
// Single-rune tokens like '(' or ')' have a token type equal to their rune.
// All other tokens have a negative token type.
const (
errTokenType = rune(-1)
eofTokenType = rune(-2)
strTokenType = rune(-3)
notTokenType = rune(-4)
angleTokenType = rune(-5)
squareTokenType = rune(-6)
)
func lex(s string) (tokenType rune, tokenStr string, remaining string) {
// The net/textproto Reader that parses the HTTP header will collapse
// Linear White Space that spans multiple "\r\n" lines to a single " ",
// so we don't need to look for '\r' or '\n'.
for len(s) > 0 && (s[0] == '\t' || s[0] == ' ') {
s = s[1:]
}
if len(s) == 0 {
return eofTokenType, "", ""
}
i := 0
loop:
for ; i < len(s); i++ {
switch s[i] {
case '\t', ' ', '(', ')', '<', '>', '[', ']':
break loop
}
}
if i != 0 {
tokenStr, remaining = s[:i], s[i:]
if tokenStr == "Not" {
return notTokenType, "", remaining
}
return strTokenType, tokenStr, remaining
}
j := 0
switch s[0] {
case '<':
j, tokenType = strings.IndexByte(s, '>'), angleTokenType
case '[':
j, tokenType = strings.IndexByte(s, ']'), squareTokenType
default:
return rune(s[0]), "", s[1:]
}
if j < 0 {
return errTokenType, "", ""
}
return tokenType, s[1:j], s[j+1:]
}

View File

@ -1,322 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package webdav
import (
"reflect"
"strings"
"testing"
)
func TestParseIfHeader(t *testing.T) {
// The "section x.y.z" test cases come from section x.y.z of the spec at
// http://www.webdav.org/specs/rfc4918.html
testCases := []struct {
desc string
input string
want ifHeader
}{{
"bad: empty",
``,
ifHeader{},
}, {
"bad: no parens",
`foobar`,
ifHeader{},
}, {
"bad: empty list #1",
`()`,
ifHeader{},
}, {
"bad: empty list #2",
`(a) (b c) () (d)`,
ifHeader{},
}, {
"bad: no list after resource #1",
`<foo>`,
ifHeader{},
}, {
"bad: no list after resource #2",
`<foo> <bar> (a)`,
ifHeader{},
}, {
"bad: no list after resource #3",
`<foo> (a) (b) <bar>`,
ifHeader{},
}, {
"bad: no-tag-list followed by tagged-list",
`(a) (b) <foo> (c)`,
ifHeader{},
}, {
"bad: unfinished list",
`(a`,
ifHeader{},
}, {
"bad: unfinished ETag",
`([b`,
ifHeader{},
}, {
"bad: unfinished Notted list",
`(Not a`,
ifHeader{},
}, {
"bad: double Not",
`(Not Not a)`,
ifHeader{},
}, {
"good: one list with a Token",
`(a)`,
ifHeader{
lists: []ifList{{
conditions: []Condition{{
Token: `a`,
}},
}},
},
}, {
"good: one list with an ETag",
`([a])`,
ifHeader{
lists: []ifList{{
conditions: []Condition{{
ETag: `a`,
}},
}},
},
}, {
"good: one list with three Nots",
`(Not a Not b Not [d])`,
ifHeader{
lists: []ifList{{
conditions: []Condition{{
Not: true,
Token: `a`,
}, {
Not: true,
Token: `b`,
}, {
Not: true,
ETag: `d`,
}},
}},
},
}, {
"good: two lists",
`(a) (b)`,
ifHeader{
lists: []ifList{{
conditions: []Condition{{
Token: `a`,
}},
}, {
conditions: []Condition{{
Token: `b`,
}},
}},
},
}, {
"good: two Notted lists",
`(Not a) (Not b)`,
ifHeader{
lists: []ifList{{
conditions: []Condition{{
Not: true,
Token: `a`,
}},
}, {
conditions: []Condition{{
Not: true,
Token: `b`,
}},
}},
},
}, {
"section 7.5.1",
`<http://www.example.com/users/f/fielding/index.html>
(<urn:uuid:f81d4fae-7dec-11d0-a765-00a0c91e6bf6>)`,
ifHeader{
lists: []ifList{{
resourceTag: `http://www.example.com/users/f/fielding/index.html`,
conditions: []Condition{{
Token: `urn:uuid:f81d4fae-7dec-11d0-a765-00a0c91e6bf6`,
}},
}},
},
}, {
"section 7.5.2 #1",
`(<urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf>)`,
ifHeader{
lists: []ifList{{
conditions: []Condition{{
Token: `urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf`,
}},
}},
},
}, {
"section 7.5.2 #2",
`<http://example.com/locked/>
(<urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf>)`,
ifHeader{
lists: []ifList{{
resourceTag: `http://example.com/locked/`,
conditions: []Condition{{
Token: `urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf`,
}},
}},
},
}, {
"section 7.5.2 #3",
`<http://example.com/locked/member>
(<urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf>)`,
ifHeader{
lists: []ifList{{
resourceTag: `http://example.com/locked/member`,
conditions: []Condition{{
Token: `urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf`,
}},
}},
},
}, {
"section 9.9.6",
`(<urn:uuid:fe184f2e-6eec-41d0-c765-01adc56e6bb4>)
(<urn:uuid:e454f3f3-acdc-452a-56c7-00a5c91e4b77>)`,
ifHeader{
lists: []ifList{{
conditions: []Condition{{
Token: `urn:uuid:fe184f2e-6eec-41d0-c765-01adc56e6bb4`,
}},
}, {
conditions: []Condition{{
Token: `urn:uuid:e454f3f3-acdc-452a-56c7-00a5c91e4b77`,
}},
}},
},
}, {
"section 9.10.8",
`(<urn:uuid:e71d4fae-5dec-22d6-fea5-00a0c91e6be4>)`,
ifHeader{
lists: []ifList{{
conditions: []Condition{{
Token: `urn:uuid:e71d4fae-5dec-22d6-fea5-00a0c91e6be4`,
}},
}},
},
}, {
"section 10.4.6",
`(<urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2>
["I am an ETag"])
(["I am another ETag"])`,
ifHeader{
lists: []ifList{{
conditions: []Condition{{
Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`,
}, {
ETag: `"I am an ETag"`,
}},
}, {
conditions: []Condition{{
ETag: `"I am another ETag"`,
}},
}},
},
}, {
"section 10.4.7",
`(Not <urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2>
<urn:uuid:58f202ac-22cf-11d1-b12d-002035b29092>)`,
ifHeader{
lists: []ifList{{
conditions: []Condition{{
Not: true,
Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`,
}, {
Token: `urn:uuid:58f202ac-22cf-11d1-b12d-002035b29092`,
}},
}},
},
}, {
"section 10.4.8",
`(<urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2>)
(Not <DAV:no-lock>)`,
ifHeader{
lists: []ifList{{
conditions: []Condition{{
Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`,
}},
}, {
conditions: []Condition{{
Not: true,
Token: `DAV:no-lock`,
}},
}},
},
}, {
"section 10.4.9",
`</resource1>
(<urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2>
[W/"A weak ETag"]) (["strong ETag"])`,
ifHeader{
lists: []ifList{{
resourceTag: `/resource1`,
conditions: []Condition{{
Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`,
}, {
ETag: `W/"A weak ETag"`,
}},
}, {
resourceTag: `/resource1`,
conditions: []Condition{{
ETag: `"strong ETag"`,
}},
}},
},
}, {
"section 10.4.10",
`<http://www.example.com/specs/>
(<urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2>)`,
ifHeader{
lists: []ifList{{
resourceTag: `http://www.example.com/specs/`,
conditions: []Condition{{
Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`,
}},
}},
},
}, {
"section 10.4.11 #1",
`</specs/rfc2518.doc> (["4217"])`,
ifHeader{
lists: []ifList{{
resourceTag: `/specs/rfc2518.doc`,
conditions: []Condition{{
ETag: `"4217"`,
}},
}},
},
}, {
"section 10.4.11 #2",
`</specs/rfc2518.doc> (Not ["4217"])`,
ifHeader{
lists: []ifList{{
resourceTag: `/specs/rfc2518.doc`,
conditions: []Condition{{
Not: true,
ETag: `"4217"`,
}},
}},
},
}}
for _, tc := range testCases {
got, ok := parseIfHeader(strings.Replace(tc.input, "\n", "", -1))
if gotEmpty := reflect.DeepEqual(got, ifHeader{}); gotEmpty == ok {
t.Errorf("%s: should be different: empty header == %t, ok == %t", tc.desc, gotEmpty, ok)
continue
}
if !reflect.DeepEqual(got, tc.want) {
t.Errorf("%s:\ngot %v\nwant %v", tc.desc, got, tc.want)
continue
}
}
}

View File

@ -1,11 +0,0 @@
This is a fork of the encoding/xml package at ca1d6c4, the last commit before
https://go.googlesource.com/go/+/c0d6d33 "encoding/xml: restore Go 1.4 name
space behavior" made late in the lead-up to the Go 1.5 release.
The list of encoding/xml changes is at
https://go.googlesource.com/go/+log/master/src/encoding/xml
This fork is temporary, and I (nigeltao) expect to revert it after Go 1.6 is
released.
See http://golang.org/issue/11841

View File

@ -1,56 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package xml
import "time"
var atomValue = &Feed{
XMLName: Name{"http://www.w3.org/2005/Atom", "feed"},
Title: "Example Feed",
Link: []Link{{Href: "http://example.org/"}},
Updated: ParseTime("2003-12-13T18:30:02Z"),
Author: Person{Name: "John Doe"},
Id: "urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6",
Entry: []Entry{
{
Title: "Atom-Powered Robots Run Amok",
Link: []Link{{Href: "http://example.org/2003/12/13/atom03"}},
Id: "urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a",
Updated: ParseTime("2003-12-13T18:30:02Z"),
Summary: NewText("Some text."),
},
},
}
var atomXml = `` +
`<feed xmlns="http://www.w3.org/2005/Atom" updated="2003-12-13T18:30:02Z">` +
`<title>Example Feed</title>` +
`<id>urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6</id>` +
`<link href="http://example.org/"></link>` +
`<author><name>John Doe</name><uri></uri><email></email></author>` +
`<entry>` +
`<title>Atom-Powered Robots Run Amok</title>` +
`<id>urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a</id>` +
`<link href="http://example.org/2003/12/13/atom03"></link>` +
`<updated>2003-12-13T18:30:02Z</updated>` +
`<author><name></name><uri></uri><email></email></author>` +
`<summary>Some text.</summary>` +
`</entry>` +
`</feed>`
func ParseTime(str string) time.Time {
t, err := time.Parse(time.RFC3339, str)
if err != nil {
panic(err)
}
return t
}
func NewText(text string) Text {
return Text{
Body: text,
}
}

View File

@ -1,151 +0,0 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package xml_test
import (
"encoding/xml"
"fmt"
"os"
)
func ExampleMarshalIndent() {
type Address struct {
City, State string
}
type Person struct {
XMLName xml.Name `xml:"person"`
Id int `xml:"id,attr"`
FirstName string `xml:"name>first"`
LastName string `xml:"name>last"`
Age int `xml:"age"`
Height float32 `xml:"height,omitempty"`
Married bool
Address
Comment string `xml:",comment"`
}
v := &Person{Id: 13, FirstName: "John", LastName: "Doe", Age: 42}
v.Comment = " Need more details. "
v.Address = Address{"Hanga Roa", "Easter Island"}
output, err := xml.MarshalIndent(v, " ", " ")
if err != nil {
fmt.Printf("error: %v\n", err)
}
os.Stdout.Write(output)
// Output:
// <person id="13">
// <name>
// <first>John</first>
// <last>Doe</last>
// </name>
// <age>42</age>
// <Married>false</Married>
// <City>Hanga Roa</City>
// <State>Easter Island</State>
// <!-- Need more details. -->
// </person>
}
func ExampleEncoder() {
type Address struct {
City, State string
}
type Person struct {
XMLName xml.Name `xml:"person"`
Id int `xml:"id,attr"`
FirstName string `xml:"name>first"`
LastName string `xml:"name>last"`
Age int `xml:"age"`
Height float32 `xml:"height,omitempty"`
Married bool
Address
Comment string `xml:",comment"`
}
v := &Person{Id: 13, FirstName: "John", LastName: "Doe", Age: 42}
v.Comment = " Need more details. "
v.Address = Address{"Hanga Roa", "Easter Island"}
enc := xml.NewEncoder(os.Stdout)
enc.Indent(" ", " ")
if err := enc.Encode(v); err != nil {
fmt.Printf("error: %v\n", err)
}
// Output:
// <person id="13">
// <name>
// <first>John</first>
// <last>Doe</last>
// </name>
// <age>42</age>
// <Married>false</Married>
// <City>Hanga Roa</City>
// <State>Easter Island</State>
// <!-- Need more details. -->
// </person>
}
// This example demonstrates unmarshaling an XML excerpt into a value with
// some preset fields. Note that the Phone field isn't modified and that
// the XML <Company> element is ignored. Also, the Groups field is assigned
// considering the element path provided in its tag.
func ExampleUnmarshal() {
type Email struct {
Where string `xml:"where,attr"`
Addr string
}
type Address struct {
City, State string
}
type Result struct {
XMLName xml.Name `xml:"Person"`
Name string `xml:"FullName"`
Phone string
Email []Email
Groups []string `xml:"Group>Value"`
Address
}
v := Result{Name: "none", Phone: "none"}
data := `
<Person>
<FullName>Grace R. Emlin</FullName>
<Company>Example Inc.</Company>
<Email where="home">
<Addr>gre@example.com</Addr>
</Email>
<Email where='work'>
<Addr>gre@work.com</Addr>
</Email>
<Group>
<Value>Friends</Value>
<Value>Squash</Value>
</Group>
<City>Hanga Roa</City>
<State>Easter Island</State>
</Person>
`
err := xml.Unmarshal([]byte(data), &v)
if err != nil {
fmt.Printf("error: %v", err)
return
}
fmt.Printf("XMLName: %#v\n", v.XMLName)
fmt.Printf("Name: %q\n", v.Name)
fmt.Printf("Phone: %q\n", v.Phone)
fmt.Printf("Email: %v\n", v.Email)
fmt.Printf("Groups: %v\n", v.Groups)
fmt.Printf("Address: %v\n", v.Address)
// Output:
// XMLName: xml.Name{Space:"", Local:"Person"}
// Name: "Grace R. Emlin"
// Phone: "none"
// Email: [{home gre@example.com} {work gre@work.com}]
// Groups: [Friends Squash]
// Address: {Hanga Roa Easter Island}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,692 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package xml
import (
"bytes"
"encoding"
"errors"
"fmt"
"reflect"
"strconv"
"strings"
)
// BUG(rsc): Mapping between XML elements and data structures is inherently flawed:
// an XML element is an order-dependent collection of anonymous
// values, while a data structure is an order-independent collection
// of named values.
// See package json for a textual representation more suitable
// to data structures.
// Unmarshal parses the XML-encoded data and stores the result in
// the value pointed to by v, which must be an arbitrary struct,
// slice, or string. Well-formed data that does not fit into v is
// discarded.
//
// Because Unmarshal uses the reflect package, it can only assign
// to exported (upper case) fields. Unmarshal uses a case-sensitive
// comparison to match XML element names to tag values and struct
// field names.
//
// Unmarshal maps an XML element to a struct using the following rules.
// In the rules, the tag of a field refers to the value associated with the
// key 'xml' in the struct field's tag (see the example above).
//
// * If the struct has a field of type []byte or string with tag
// ",innerxml", Unmarshal accumulates the raw XML nested inside the
// element in that field. The rest of the rules still apply.
//
// * If the struct has a field named XMLName of type xml.Name,
// Unmarshal records the element name in that field.
//
// * If the XMLName field has an associated tag of the form
// "name" or "namespace-URL name", the XML element must have
// the given name (and, optionally, name space) or else Unmarshal
// returns an error.
//
// * If the XML element has an attribute whose name matches a
// struct field name with an associated tag containing ",attr" or
// the explicit name in a struct field tag of the form "name,attr",
// Unmarshal records the attribute value in that field.
//
// * If the XML element contains character data, that data is
// accumulated in the first struct field that has tag ",chardata".
// The struct field may have type []byte or string.
// If there is no such field, the character data is discarded.
//
// * If the XML element contains comments, they are accumulated in
// the first struct field that has tag ",comment". The struct
// field may have type []byte or string. If there is no such
// field, the comments are discarded.
//
// * If the XML element contains a sub-element whose name matches
// the prefix of a tag formatted as "a" or "a>b>c", unmarshal
// will descend into the XML structure looking for elements with the
// given names, and will map the innermost elements to that struct
// field. A tag starting with ">" is equivalent to one starting
// with the field name followed by ">".
//
// * If the XML element contains a sub-element whose name matches
// a struct field's XMLName tag and the struct field has no
// explicit name tag as per the previous rule, unmarshal maps
// the sub-element to that struct field.
//
// * If the XML element contains a sub-element whose name matches a
// field without any mode flags (",attr", ",chardata", etc), Unmarshal
// maps the sub-element to that struct field.
//
// * If the XML element contains a sub-element that hasn't matched any
// of the above rules and the struct has a field with tag ",any",
// unmarshal maps the sub-element to that struct field.
//
// * An anonymous struct field is handled as if the fields of its
// value were part of the outer struct.
//
// * A struct field with tag "-" is never unmarshalled into.
//
// Unmarshal maps an XML element to a string or []byte by saving the
// concatenation of that element's character data in the string or
// []byte. The saved []byte is never nil.
//
// Unmarshal maps an attribute value to a string or []byte by saving
// the value in the string or slice.
//
// Unmarshal maps an XML element to a slice by extending the length of
// the slice and mapping the element to the newly created value.
//
// Unmarshal maps an XML element or attribute value to a bool by
// setting it to the boolean value represented by the string.
//
// Unmarshal maps an XML element or attribute value to an integer or
// floating-point field by setting the field to the result of
// interpreting the string value in decimal. There is no check for
// overflow.
//
// Unmarshal maps an XML element to an xml.Name by recording the
// element name.
//
// Unmarshal maps an XML element to a pointer by setting the pointer
// to a freshly allocated value and then mapping the element to that value.
//
func Unmarshal(data []byte, v interface{}) error {
return NewDecoder(bytes.NewReader(data)).Decode(v)
}
// Decode works like xml.Unmarshal, except it reads the decoder
// stream to find the start element.
func (d *Decoder) Decode(v interface{}) error {
return d.DecodeElement(v, nil)
}
// DecodeElement works like xml.Unmarshal except that it takes
// a pointer to the start XML element to decode into v.
// It is useful when a client reads some raw XML tokens itself
// but also wants to defer to Unmarshal for some elements.
func (d *Decoder) DecodeElement(v interface{}, start *StartElement) error {
val := reflect.ValueOf(v)
if val.Kind() != reflect.Ptr {
return errors.New("non-pointer passed to Unmarshal")
}
return d.unmarshal(val.Elem(), start)
}
// An UnmarshalError represents an error in the unmarshalling process.
type UnmarshalError string
func (e UnmarshalError) Error() string { return string(e) }
// Unmarshaler is the interface implemented by objects that can unmarshal
// an XML element description of themselves.
//
// UnmarshalXML decodes a single XML element
// beginning with the given start element.
// If it returns an error, the outer call to Unmarshal stops and
// returns that error.
// UnmarshalXML must consume exactly one XML element.
// One common implementation strategy is to unmarshal into
// a separate value with a layout matching the expected XML
// using d.DecodeElement, and then to copy the data from
// that value into the receiver.
// Another common strategy is to use d.Token to process the
// XML object one token at a time.
// UnmarshalXML may not use d.RawToken.
type Unmarshaler interface {
UnmarshalXML(d *Decoder, start StartElement) error
}
// UnmarshalerAttr is the interface implemented by objects that can unmarshal
// an XML attribute description of themselves.
//
// UnmarshalXMLAttr decodes a single XML attribute.
// If it returns an error, the outer call to Unmarshal stops and
// returns that error.
// UnmarshalXMLAttr is used only for struct fields with the
// "attr" option in the field tag.
type UnmarshalerAttr interface {
UnmarshalXMLAttr(attr Attr) error
}
// receiverType returns the receiver type to use in an expression like "%s.MethodName".
func receiverType(val interface{}) string {
t := reflect.TypeOf(val)
if t.Name() != "" {
return t.String()
}
return "(" + t.String() + ")"
}
// unmarshalInterface unmarshals a single XML element into val.
// start is the opening tag of the element.
func (p *Decoder) unmarshalInterface(val Unmarshaler, start *StartElement) error {
// Record that decoder must stop at end tag corresponding to start.
p.pushEOF()
p.unmarshalDepth++
err := val.UnmarshalXML(p, *start)
p.unmarshalDepth--
if err != nil {
p.popEOF()
return err
}
if !p.popEOF() {
return fmt.Errorf("xml: %s.UnmarshalXML did not consume entire <%s> element", receiverType(val), start.Name.Local)
}
return nil
}
// unmarshalTextInterface unmarshals a single XML element into val.
// The chardata contained in the element (but not its children)
// is passed to the text unmarshaler.
func (p *Decoder) unmarshalTextInterface(val encoding.TextUnmarshaler, start *StartElement) error {
var buf []byte
depth := 1
for depth > 0 {
t, err := p.Token()
if err != nil {
return err
}
switch t := t.(type) {
case CharData:
if depth == 1 {
buf = append(buf, t...)
}
case StartElement:
depth++
case EndElement:
depth--
}
}
return val.UnmarshalText(buf)
}
// unmarshalAttr unmarshals a single XML attribute into val.
func (p *Decoder) unmarshalAttr(val reflect.Value, attr Attr) error {
if val.Kind() == reflect.Ptr {
if val.IsNil() {
val.Set(reflect.New(val.Type().Elem()))
}
val = val.Elem()
}
if val.CanInterface() && val.Type().Implements(unmarshalerAttrType) {
// This is an unmarshaler with a non-pointer receiver,
// so it's likely to be incorrect, but we do what we're told.
return val.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr)
}
if val.CanAddr() {
pv := val.Addr()
if pv.CanInterface() && pv.Type().Implements(unmarshalerAttrType) {
return pv.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr)
}
}
// Not an UnmarshalerAttr; try encoding.TextUnmarshaler.
if val.CanInterface() && val.Type().Implements(textUnmarshalerType) {
// This is an unmarshaler with a non-pointer receiver,
// so it's likely to be incorrect, but we do what we're told.
return val.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value))
}
if val.CanAddr() {
pv := val.Addr()
if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) {
return pv.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value))
}
}
copyValue(val, []byte(attr.Value))
return nil
}
var (
unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
unmarshalerAttrType = reflect.TypeOf((*UnmarshalerAttr)(nil)).Elem()
textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
)
// Unmarshal a single XML element into val.
func (p *Decoder) unmarshal(val reflect.Value, start *StartElement) error {
// Find start element if we need it.
if start == nil {
for {
tok, err := p.Token()
if err != nil {
return err
}
if t, ok := tok.(StartElement); ok {
start = &t
break
}
}
}
// Load value from interface, but only if the result will be
// usefully addressable.
if val.Kind() == reflect.Interface && !val.IsNil() {
e := val.Elem()
if e.Kind() == reflect.Ptr && !e.IsNil() {
val = e
}
}
if val.Kind() == reflect.Ptr {
if val.IsNil() {
val.Set(reflect.New(val.Type().Elem()))
}
val = val.Elem()
}
if val.CanInterface() && val.Type().Implements(unmarshalerType) {
// This is an unmarshaler with a non-pointer receiver,
// so it's likely to be incorrect, but we do what we're told.
return p.unmarshalInterface(val.Interface().(Unmarshaler), start)
}
if val.CanAddr() {
pv := val.Addr()
if pv.CanInterface() && pv.Type().Implements(unmarshalerType) {
return p.unmarshalInterface(pv.Interface().(Unmarshaler), start)
}
}
if val.CanInterface() && val.Type().Implements(textUnmarshalerType) {
return p.unmarshalTextInterface(val.Interface().(encoding.TextUnmarshaler), start)
}
if val.CanAddr() {
pv := val.Addr()
if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) {
return p.unmarshalTextInterface(pv.Interface().(encoding.TextUnmarshaler), start)
}
}
var (
data []byte
saveData reflect.Value
comment []byte
saveComment reflect.Value
saveXML reflect.Value
saveXMLIndex int
saveXMLData []byte
saveAny reflect.Value
sv reflect.Value
tinfo *typeInfo
err error
)
switch v := val; v.Kind() {
default:
return errors.New("unknown type " + v.Type().String())
case reflect.Interface:
// TODO: For now, simply ignore the field. In the near
// future we may choose to unmarshal the start
// element on it, if not nil.
return p.Skip()
case reflect.Slice:
typ := v.Type()
if typ.Elem().Kind() == reflect.Uint8 {
// []byte
saveData = v
break
}
// Slice of element values.
// Grow slice.
n := v.Len()
if n >= v.Cap() {
ncap := 2 * n
if ncap < 4 {
ncap = 4
}
new := reflect.MakeSlice(typ, n, ncap)
reflect.Copy(new, v)
v.Set(new)
}
v.SetLen(n + 1)
// Recur to read element into slice.
if err := p.unmarshal(v.Index(n), start); err != nil {
v.SetLen(n)
return err
}
return nil
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.String:
saveData = v
case reflect.Struct:
typ := v.Type()
if typ == nameType {
v.Set(reflect.ValueOf(start.Name))
break
}
sv = v
tinfo, err = getTypeInfo(typ)
if err != nil {
return err
}
// Validate and assign element name.
if tinfo.xmlname != nil {
finfo := tinfo.xmlname
if finfo.name != "" && finfo.name != start.Name.Local {
return UnmarshalError("expected element type <" + finfo.name + "> but have <" + start.Name.Local + ">")
}
if finfo.xmlns != "" && finfo.xmlns != start.Name.Space {
e := "expected element <" + finfo.name + "> in name space " + finfo.xmlns + " but have "
if start.Name.Space == "" {
e += "no name space"
} else {
e += start.Name.Space
}
return UnmarshalError(e)
}
fv := finfo.value(sv)
if _, ok := fv.Interface().(Name); ok {
fv.Set(reflect.ValueOf(start.Name))
}
}
// Assign attributes.
// Also, determine whether we need to save character data or comments.
for i := range tinfo.fields {
finfo := &tinfo.fields[i]
switch finfo.flags & fMode {
case fAttr:
strv := finfo.value(sv)
// Look for attribute.
for _, a := range start.Attr {
if a.Name.Local == finfo.name && (finfo.xmlns == "" || finfo.xmlns == a.Name.Space) {
if err := p.unmarshalAttr(strv, a); err != nil {
return err
}
break
}
}
case fCharData:
if !saveData.IsValid() {
saveData = finfo.value(sv)
}
case fComment:
if !saveComment.IsValid() {
saveComment = finfo.value(sv)
}
case fAny, fAny | fElement:
if !saveAny.IsValid() {
saveAny = finfo.value(sv)
}
case fInnerXml:
if !saveXML.IsValid() {
saveXML = finfo.value(sv)
if p.saved == nil {
saveXMLIndex = 0
p.saved = new(bytes.Buffer)
} else {
saveXMLIndex = p.savedOffset()
}
}
}
}
}
// Find end element.
// Process sub-elements along the way.
Loop:
for {
var savedOffset int
if saveXML.IsValid() {
savedOffset = p.savedOffset()
}
tok, err := p.Token()
if err != nil {
return err
}
switch t := tok.(type) {
case StartElement:
consumed := false
if sv.IsValid() {
consumed, err = p.unmarshalPath(tinfo, sv, nil, &t)
if err != nil {
return err
}
if !consumed && saveAny.IsValid() {
consumed = true
if err := p.unmarshal(saveAny, &t); err != nil {
return err
}
}
}
if !consumed {
if err := p.Skip(); err != nil {
return err
}
}
case EndElement:
if saveXML.IsValid() {
saveXMLData = p.saved.Bytes()[saveXMLIndex:savedOffset]
if saveXMLIndex == 0 {
p.saved = nil
}
}
break Loop
case CharData:
if saveData.IsValid() {
data = append(data, t...)
}
case Comment:
if saveComment.IsValid() {
comment = append(comment, t...)
}
}
}
if saveData.IsValid() && saveData.CanInterface() && saveData.Type().Implements(textUnmarshalerType) {
if err := saveData.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil {
return err
}
saveData = reflect.Value{}
}
if saveData.IsValid() && saveData.CanAddr() {
pv := saveData.Addr()
if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) {
if err := pv.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil {
return err
}
saveData = reflect.Value{}
}
}
if err := copyValue(saveData, data); err != nil {
return err
}
switch t := saveComment; t.Kind() {
case reflect.String:
t.SetString(string(comment))
case reflect.Slice:
t.Set(reflect.ValueOf(comment))
}
switch t := saveXML; t.Kind() {
case reflect.String:
t.SetString(string(saveXMLData))
case reflect.Slice:
t.Set(reflect.ValueOf(saveXMLData))
}
return nil
}
func copyValue(dst reflect.Value, src []byte) (err error) {
dst0 := dst
if dst.Kind() == reflect.Ptr {
if dst.IsNil() {
dst.Set(reflect.New(dst.Type().Elem()))
}
dst = dst.Elem()
}
// Save accumulated data.
switch dst.Kind() {
case reflect.Invalid:
// Probably a comment.
default:
return errors.New("cannot unmarshal into " + dst0.Type().String())
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
itmp, err := strconv.ParseInt(string(src), 10, dst.Type().Bits())
if err != nil {
return err
}
dst.SetInt(itmp)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
utmp, err := strconv.ParseUint(string(src), 10, dst.Type().Bits())
if err != nil {
return err
}
dst.SetUint(utmp)
case reflect.Float32, reflect.Float64:
ftmp, err := strconv.ParseFloat(string(src), dst.Type().Bits())
if err != nil {
return err
}
dst.SetFloat(ftmp)
case reflect.Bool:
value, err := strconv.ParseBool(strings.TrimSpace(string(src)))
if err != nil {
return err
}
dst.SetBool(value)
case reflect.String:
dst.SetString(string(src))
case reflect.Slice:
if len(src) == 0 {
// non-nil to flag presence
src = []byte{}
}
dst.SetBytes(src)
}
return nil
}
// unmarshalPath walks down an XML structure looking for wanted
// paths, and calls unmarshal on them.
// The consumed result tells whether XML elements have been consumed
// from the Decoder until start's matching end element, or if it's
// still untouched because start is uninteresting for sv's fields.
func (p *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement) (consumed bool, err error) {
recurse := false
Loop:
for i := range tinfo.fields {
finfo := &tinfo.fields[i]
if finfo.flags&fElement == 0 || len(finfo.parents) < len(parents) || finfo.xmlns != "" && finfo.xmlns != start.Name.Space {
continue
}
for j := range parents {
if parents[j] != finfo.parents[j] {
continue Loop
}
}
if len(finfo.parents) == len(parents) && finfo.name == start.Name.Local {
// It's a perfect match, unmarshal the field.
return true, p.unmarshal(finfo.value(sv), start)
}
if len(finfo.parents) > len(parents) && finfo.parents[len(parents)] == start.Name.Local {
// It's a prefix for the field. Break and recurse
// since it's not ok for one field path to be itself
// the prefix for another field path.
recurse = true
// We can reuse the same slice as long as we
// don't try to append to it.
parents = finfo.parents[:len(parents)+1]
break
}
}
if !recurse {
// We have no business with this element.
return false, nil
}
// The element is not a perfect match for any field, but one
// or more fields have the path to this element as a parent
// prefix. Recurse and attempt to match these.
for {
var tok Token
tok, err = p.Token()
if err != nil {
return true, err
}
switch t := tok.(type) {
case StartElement:
consumed2, err := p.unmarshalPath(tinfo, sv, parents, &t)
if err != nil {
return true, err
}
if !consumed2 {
if err := p.Skip(); err != nil {
return true, err
}
}
case EndElement:
return true, nil
}
}
}
// Skip reads tokens until it has consumed the end element
// matching the most recent start element already consumed.
// It recurs if it encounters a start element, so it can be used to
// skip nested structures.
// It returns nil if it finds an end element matching the start
// element; otherwise it returns an error describing the problem.
func (d *Decoder) Skip() error {
for {
tok, err := d.Token()
if err != nil {
return err
}
switch tok.(type) {
case StartElement:
if err := d.Skip(); err != nil {
return err
}
case EndElement:
return nil
}
}
}

View File

@ -1,744 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package xml
import (
"bytes"
"fmt"
"io"
"reflect"
"strings"
"testing"
"time"
)
// Stripped down Atom feed data structures.
func TestUnmarshalFeed(t *testing.T) {
var f Feed
if err := Unmarshal([]byte(atomFeedString), &f); err != nil {
t.Fatalf("Unmarshal: %s", err)
}
if !reflect.DeepEqual(f, atomFeed) {
t.Fatalf("have %#v\nwant %#v", f, atomFeed)
}
}
// hget http://codereview.appspot.com/rss/mine/rsc
const atomFeedString = `
<?xml version="1.0" encoding="utf-8"?>
<feed xmlns="http://www.w3.org/2005/Atom" xml:lang="en-us" updated="2009-10-04T01:35:58+00:00"><title>Code Review - My issues</title><link href="http://codereview.appspot.com/" rel="alternate"></link><link href="http://codereview.appspot.com/rss/mine/rsc" rel="self"></link><id>http://codereview.appspot.com/</id><author><name>rietveld&lt;&gt;</name></author><entry><title>rietveld: an attempt at pubsubhubbub
</title><link href="http://codereview.appspot.com/126085" rel="alternate"></link><updated>2009-10-04T01:35:58+00:00</updated><author><name>email-address-removed</name></author><id>urn:md5:134d9179c41f806be79b3a5f7877d19a</id><summary type="html">
An attempt at adding pubsubhubbub support to Rietveld.
http://code.google.com/p/pubsubhubbub
http://code.google.com/p/rietveld/issues/detail?id=155
The server side of the protocol is trivial:
1. add a &amp;lt;link rel=&amp;quot;hub&amp;quot; href=&amp;quot;hub-server&amp;quot;&amp;gt; tag to all
feeds that will be pubsubhubbubbed.
2. every time one of those feeds changes, tell the hub
with a simple POST request.
I have tested this by adding debug prints to a local hub
server and checking that the server got the right publish
requests.
I can&amp;#39;t quite get the server to work, but I think the bug
is not in my code. I think that the server expects to be
able to grab the feed and see the feed&amp;#39;s actual URL in
the link rel=&amp;quot;self&amp;quot;, but the default value for that drops
the :port from the URL, and I cannot for the life of me
figure out how to get the Atom generator deep inside
django not to do that, or even where it is doing that,
or even what code is running to generate the Atom feed.
(I thought I knew but I added some assert False statements
and it kept running!)
Ignoring that particular problem, I would appreciate
feedback on the right way to get the two values at
the top of feeds.py marked NOTE(rsc).
</summary></entry><entry><title>rietveld: correct tab handling
</title><link href="http://codereview.appspot.com/124106" rel="alternate"></link><updated>2009-10-03T23:02:17+00:00</updated><author><name>email-address-removed</name></author><id>urn:md5:0a2a4f19bb815101f0ba2904aed7c35a</id><summary type="html">
This fixes the buggy tab rendering that can be seen at
http://codereview.appspot.com/116075/diff/1/2
The fundamental problem was that the tab code was
not being told what column the text began in, so it
didn&amp;#39;t know where to put the tab stops. Another problem
was that some of the code assumed that string byte
offsets were the same as column offsets, which is only
true if there are no tabs.
In the process of fixing this, I cleaned up the arguments
to Fold and ExpandTabs and renamed them Break and
_ExpandTabs so that I could be sure that I found all the
call sites. I also wanted to verify that ExpandTabs was
not being used from outside intra_region_diff.py.
</summary></entry></feed> `
type Feed struct {
XMLName Name `xml:"http://www.w3.org/2005/Atom feed"`
Title string `xml:"title"`
Id string `xml:"id"`
Link []Link `xml:"link"`
Updated time.Time `xml:"updated,attr"`
Author Person `xml:"author"`
Entry []Entry `xml:"entry"`
}
type Entry struct {
Title string `xml:"title"`
Id string `xml:"id"`
Link []Link `xml:"link"`
Updated time.Time `xml:"updated"`
Author Person `xml:"author"`
Summary Text `xml:"summary"`
}
type Link struct {
Rel string `xml:"rel,attr,omitempty"`
Href string `xml:"href,attr"`
}
type Person struct {
Name string `xml:"name"`
URI string `xml:"uri"`
Email string `xml:"email"`
InnerXML string `xml:",innerxml"`
}
type Text struct {
Type string `xml:"type,attr,omitempty"`
Body string `xml:",chardata"`
}
var atomFeed = Feed{
XMLName: Name{"http://www.w3.org/2005/Atom", "feed"},
Title: "Code Review - My issues",
Link: []Link{
{Rel: "alternate", Href: "http://codereview.appspot.com/"},
{Rel: "self", Href: "http://codereview.appspot.com/rss/mine/rsc"},
},
Id: "http://codereview.appspot.com/",
Updated: ParseTime("2009-10-04T01:35:58+00:00"),
Author: Person{
Name: "rietveld<>",
InnerXML: "<name>rietveld&lt;&gt;</name>",
},
Entry: []Entry{
{
Title: "rietveld: an attempt at pubsubhubbub\n",
Link: []Link{
{Rel: "alternate", Href: "http://codereview.appspot.com/126085"},
},
Updated: ParseTime("2009-10-04T01:35:58+00:00"),
Author: Person{
Name: "email-address-removed",
InnerXML: "<name>email-address-removed</name>",
},
Id: "urn:md5:134d9179c41f806be79b3a5f7877d19a",
Summary: Text{
Type: "html",
Body: `
An attempt at adding pubsubhubbub support to Rietveld.
http://code.google.com/p/pubsubhubbub
http://code.google.com/p/rietveld/issues/detail?id=155
The server side of the protocol is trivial:
1. add a &lt;link rel=&quot;hub&quot; href=&quot;hub-server&quot;&gt; tag to all
feeds that will be pubsubhubbubbed.
2. every time one of those feeds changes, tell the hub
with a simple POST request.
I have tested this by adding debug prints to a local hub
server and checking that the server got the right publish
requests.
I can&#39;t quite get the server to work, but I think the bug
is not in my code. I think that the server expects to be
able to grab the feed and see the feed&#39;s actual URL in
the link rel=&quot;self&quot;, but the default value for that drops
the :port from the URL, and I cannot for the life of me
figure out how to get the Atom generator deep inside
django not to do that, or even where it is doing that,
or even what code is running to generate the Atom feed.
(I thought I knew but I added some assert False statements
and it kept running!)
Ignoring that particular problem, I would appreciate
feedback on the right way to get the two values at
the top of feeds.py marked NOTE(rsc).
`,
},
},
{
Title: "rietveld: correct tab handling\n",
Link: []Link{
{Rel: "alternate", Href: "http://codereview.appspot.com/124106"},
},
Updated: ParseTime("2009-10-03T23:02:17+00:00"),
Author: Person{
Name: "email-address-removed",
InnerXML: "<name>email-address-removed</name>",
},
Id: "urn:md5:0a2a4f19bb815101f0ba2904aed7c35a",
Summary: Text{
Type: "html",
Body: `
This fixes the buggy tab rendering that can be seen at
http://codereview.appspot.com/116075/diff/1/2
The fundamental problem was that the tab code was
not being told what column the text began in, so it
didn&#39;t know where to put the tab stops. Another problem
was that some of the code assumed that string byte
offsets were the same as column offsets, which is only
true if there are no tabs.
In the process of fixing this, I cleaned up the arguments
to Fold and ExpandTabs and renamed them Break and
_ExpandTabs so that I could be sure that I found all the
call sites. I also wanted to verify that ExpandTabs was
not being used from outside intra_region_diff.py.
`,
},
},
},
}
const pathTestString = `
<Result>
<Before>1</Before>
<Items>
<Item1>
<Value>A</Value>
</Item1>
<Item2>
<Value>B</Value>
</Item2>
<Item1>
<Value>C</Value>
<Value>D</Value>
</Item1>
<_>
<Value>E</Value>
</_>
</Items>
<After>2</After>
</Result>
`
type PathTestItem struct {
Value string
}
type PathTestA struct {
Items []PathTestItem `xml:">Item1"`
Before, After string
}
type PathTestB struct {
Other []PathTestItem `xml:"Items>Item1"`
Before, After string
}
type PathTestC struct {
Values1 []string `xml:"Items>Item1>Value"`
Values2 []string `xml:"Items>Item2>Value"`
Before, After string
}
type PathTestSet struct {
Item1 []PathTestItem
}
type PathTestD struct {
Other PathTestSet `xml:"Items"`
Before, After string
}
type PathTestE struct {
Underline string `xml:"Items>_>Value"`
Before, After string
}
var pathTests = []interface{}{
&PathTestA{Items: []PathTestItem{{"A"}, {"D"}}, Before: "1", After: "2"},
&PathTestB{Other: []PathTestItem{{"A"}, {"D"}}, Before: "1", After: "2"},
&PathTestC{Values1: []string{"A", "C", "D"}, Values2: []string{"B"}, Before: "1", After: "2"},
&PathTestD{Other: PathTestSet{Item1: []PathTestItem{{"A"}, {"D"}}}, Before: "1", After: "2"},
&PathTestE{Underline: "E", Before: "1", After: "2"},
}
func TestUnmarshalPaths(t *testing.T) {
for _, pt := range pathTests {
v := reflect.New(reflect.TypeOf(pt).Elem()).Interface()
if err := Unmarshal([]byte(pathTestString), v); err != nil {
t.Fatalf("Unmarshal: %s", err)
}
if !reflect.DeepEqual(v, pt) {
t.Fatalf("have %#v\nwant %#v", v, pt)
}
}
}
type BadPathTestA struct {
First string `xml:"items>item1"`
Other string `xml:"items>item2"`
Second string `xml:"items"`
}
type BadPathTestB struct {
Other string `xml:"items>item2>value"`
First string `xml:"items>item1"`
Second string `xml:"items>item1>value"`
}
type BadPathTestC struct {
First string
Second string `xml:"First"`
}
type BadPathTestD struct {
BadPathEmbeddedA
BadPathEmbeddedB
}
type BadPathEmbeddedA struct {
First string
}
type BadPathEmbeddedB struct {
Second string `xml:"First"`
}
var badPathTests = []struct {
v, e interface{}
}{
{&BadPathTestA{}, &TagPathError{reflect.TypeOf(BadPathTestA{}), "First", "items>item1", "Second", "items"}},
{&BadPathTestB{}, &TagPathError{reflect.TypeOf(BadPathTestB{}), "First", "items>item1", "Second", "items>item1>value"}},
{&BadPathTestC{}, &TagPathError{reflect.TypeOf(BadPathTestC{}), "First", "", "Second", "First"}},
{&BadPathTestD{}, &TagPathError{reflect.TypeOf(BadPathTestD{}), "First", "", "Second", "First"}},
}
func TestUnmarshalBadPaths(t *testing.T) {
for _, tt := range badPathTests {
err := Unmarshal([]byte(pathTestString), tt.v)
if !reflect.DeepEqual(err, tt.e) {
t.Fatalf("Unmarshal with %#v didn't fail properly:\nhave %#v,\nwant %#v", tt.v, err, tt.e)
}
}
}
const OK = "OK"
const withoutNameTypeData = `
<?xml version="1.0" charset="utf-8"?>
<Test3 Attr="OK" />`
type TestThree struct {
XMLName Name `xml:"Test3"`
Attr string `xml:",attr"`
}
func TestUnmarshalWithoutNameType(t *testing.T) {
var x TestThree
if err := Unmarshal([]byte(withoutNameTypeData), &x); err != nil {
t.Fatalf("Unmarshal: %s", err)
}
if x.Attr != OK {
t.Fatalf("have %v\nwant %v", x.Attr, OK)
}
}
func TestUnmarshalAttr(t *testing.T) {
type ParamVal struct {
Int int `xml:"int,attr"`
}
type ParamPtr struct {
Int *int `xml:"int,attr"`
}
type ParamStringPtr struct {
Int *string `xml:"int,attr"`
}
x := []byte(`<Param int="1" />`)
p1 := &ParamPtr{}
if err := Unmarshal(x, p1); err != nil {
t.Fatalf("Unmarshal: %s", err)
}
if p1.Int == nil {
t.Fatalf("Unmarshal failed in to *int field")
} else if *p1.Int != 1 {
t.Fatalf("Unmarshal with %s failed:\nhave %#v,\n want %#v", x, p1.Int, 1)
}
p2 := &ParamVal{}
if err := Unmarshal(x, p2); err != nil {
t.Fatalf("Unmarshal: %s", err)
}
if p2.Int != 1 {
t.Fatalf("Unmarshal with %s failed:\nhave %#v,\n want %#v", x, p2.Int, 1)
}
p3 := &ParamStringPtr{}
if err := Unmarshal(x, p3); err != nil {
t.Fatalf("Unmarshal: %s", err)
}
if p3.Int == nil {
t.Fatalf("Unmarshal failed in to *string field")
} else if *p3.Int != "1" {
t.Fatalf("Unmarshal with %s failed:\nhave %#v,\n want %#v", x, p3.Int, 1)
}
}
type Tables struct {
HTable string `xml:"http://www.w3.org/TR/html4/ table"`
FTable string `xml:"http://www.w3schools.com/furniture table"`
}
var tables = []struct {
xml string
tab Tables
ns string
}{
{
xml: `<Tables>` +
`<table xmlns="http://www.w3.org/TR/html4/">hello</table>` +
`<table xmlns="http://www.w3schools.com/furniture">world</table>` +
`</Tables>`,
tab: Tables{"hello", "world"},
},
{
xml: `<Tables>` +
`<table xmlns="http://www.w3schools.com/furniture">world</table>` +
`<table xmlns="http://www.w3.org/TR/html4/">hello</table>` +
`</Tables>`,
tab: Tables{"hello", "world"},
},
{
xml: `<Tables xmlns:f="http://www.w3schools.com/furniture" xmlns:h="http://www.w3.org/TR/html4/">` +
`<f:table>world</f:table>` +
`<h:table>hello</h:table>` +
`</Tables>`,
tab: Tables{"hello", "world"},
},
{
xml: `<Tables>` +
`<table>bogus</table>` +
`</Tables>`,
tab: Tables{},
},
{
xml: `<Tables>` +
`<table>only</table>` +
`</Tables>`,
tab: Tables{HTable: "only"},
ns: "http://www.w3.org/TR/html4/",
},
{
xml: `<Tables>` +
`<table>only</table>` +
`</Tables>`,
tab: Tables{FTable: "only"},
ns: "http://www.w3schools.com/furniture",
},
{
xml: `<Tables>` +
`<table>only</table>` +
`</Tables>`,
tab: Tables{},
ns: "something else entirely",
},
}
func TestUnmarshalNS(t *testing.T) {
for i, tt := range tables {
var dst Tables
var err error
if tt.ns != "" {
d := NewDecoder(strings.NewReader(tt.xml))
d.DefaultSpace = tt.ns
err = d.Decode(&dst)
} else {
err = Unmarshal([]byte(tt.xml), &dst)
}
if err != nil {
t.Errorf("#%d: Unmarshal: %v", i, err)
continue
}
want := tt.tab
if dst != want {
t.Errorf("#%d: dst=%+v, want %+v", i, dst, want)
}
}
}
func TestRoundTrip(t *testing.T) {
// From issue 7535
const s = `<ex:element xmlns:ex="http://example.com/schema"></ex:element>`
in := bytes.NewBufferString(s)
for i := 0; i < 10; i++ {
out := &bytes.Buffer{}
d := NewDecoder(in)
e := NewEncoder(out)
for {
t, err := d.Token()
if err == io.EOF {
break
}
if err != nil {
fmt.Println("failed:", err)
return
}
e.EncodeToken(t)
}
e.Flush()
in = out
}
if got := in.String(); got != s {
t.Errorf("have: %q\nwant: %q\n", got, s)
}
}
func TestMarshalNS(t *testing.T) {
dst := Tables{"hello", "world"}
data, err := Marshal(&dst)
if err != nil {
t.Fatalf("Marshal: %v", err)
}
want := `<Tables><table xmlns="http://www.w3.org/TR/html4/">hello</table><table xmlns="http://www.w3schools.com/furniture">world</table></Tables>`
str := string(data)
if str != want {
t.Errorf("have: %q\nwant: %q\n", str, want)
}
}
type TableAttrs struct {
TAttr TAttr
}
type TAttr struct {
HTable string `xml:"http://www.w3.org/TR/html4/ table,attr"`
FTable string `xml:"http://www.w3schools.com/furniture table,attr"`
Lang string `xml:"http://www.w3.org/XML/1998/namespace lang,attr,omitempty"`
Other1 string `xml:"http://golang.org/xml/ other,attr,omitempty"`
Other2 string `xml:"http://golang.org/xmlfoo/ other,attr,omitempty"`
Other3 string `xml:"http://golang.org/json/ other,attr,omitempty"`
Other4 string `xml:"http://golang.org/2/json/ other,attr,omitempty"`
}
var tableAttrs = []struct {
xml string
tab TableAttrs
ns string
}{
{
xml: `<TableAttrs xmlns:f="http://www.w3schools.com/furniture" xmlns:h="http://www.w3.org/TR/html4/"><TAttr ` +
`h:table="hello" f:table="world" ` +
`/></TableAttrs>`,
tab: TableAttrs{TAttr{HTable: "hello", FTable: "world"}},
},
{
xml: `<TableAttrs><TAttr xmlns:f="http://www.w3schools.com/furniture" xmlns:h="http://www.w3.org/TR/html4/" ` +
`h:table="hello" f:table="world" ` +
`/></TableAttrs>`,
tab: TableAttrs{TAttr{HTable: "hello", FTable: "world"}},
},
{
xml: `<TableAttrs><TAttr ` +
`h:table="hello" f:table="world" xmlns:f="http://www.w3schools.com/furniture" xmlns:h="http://www.w3.org/TR/html4/" ` +
`/></TableAttrs>`,
tab: TableAttrs{TAttr{HTable: "hello", FTable: "world"}},
},
{
// Default space does not apply to attribute names.
xml: `<TableAttrs xmlns="http://www.w3schools.com/furniture" xmlns:h="http://www.w3.org/TR/html4/"><TAttr ` +
`h:table="hello" table="world" ` +
`/></TableAttrs>`,
tab: TableAttrs{TAttr{HTable: "hello", FTable: ""}},
},
{
// Default space does not apply to attribute names.
xml: `<TableAttrs xmlns:f="http://www.w3schools.com/furniture"><TAttr xmlns="http://www.w3.org/TR/html4/" ` +
`table="hello" f:table="world" ` +
`/></TableAttrs>`,
tab: TableAttrs{TAttr{HTable: "", FTable: "world"}},
},
{
xml: `<TableAttrs><TAttr ` +
`table="bogus" ` +
`/></TableAttrs>`,
tab: TableAttrs{},
},
{
// Default space does not apply to attribute names.
xml: `<TableAttrs xmlns:h="http://www.w3.org/TR/html4/"><TAttr ` +
`h:table="hello" table="world" ` +
`/></TableAttrs>`,
tab: TableAttrs{TAttr{HTable: "hello", FTable: ""}},
ns: "http://www.w3schools.com/furniture",
},
{
// Default space does not apply to attribute names.
xml: `<TableAttrs xmlns:f="http://www.w3schools.com/furniture"><TAttr ` +
`table="hello" f:table="world" ` +
`/></TableAttrs>`,
tab: TableAttrs{TAttr{HTable: "", FTable: "world"}},
ns: "http://www.w3.org/TR/html4/",
},
{
xml: `<TableAttrs><TAttr ` +
`table="bogus" ` +
`/></TableAttrs>`,
tab: TableAttrs{},
ns: "something else entirely",
},
}
func TestUnmarshalNSAttr(t *testing.T) {
for i, tt := range tableAttrs {
var dst TableAttrs
var err error
if tt.ns != "" {
d := NewDecoder(strings.NewReader(tt.xml))
d.DefaultSpace = tt.ns
err = d.Decode(&dst)
} else {
err = Unmarshal([]byte(tt.xml), &dst)
}
if err != nil {
t.Errorf("#%d: Unmarshal: %v", i, err)
continue
}
want := tt.tab
if dst != want {
t.Errorf("#%d: dst=%+v, want %+v", i, dst, want)
}
}
}
func TestMarshalNSAttr(t *testing.T) {
src := TableAttrs{TAttr{"hello", "world", "en_US", "other1", "other2", "other3", "other4"}}
data, err := Marshal(&src)
if err != nil {
t.Fatalf("Marshal: %v", err)
}
want := `<TableAttrs><TAttr xmlns:json_1="http://golang.org/2/json/" xmlns:json="http://golang.org/json/" xmlns:_xmlfoo="http://golang.org/xmlfoo/" xmlns:_xml="http://golang.org/xml/" xmlns:furniture="http://www.w3schools.com/furniture" xmlns:html4="http://www.w3.org/TR/html4/" html4:table="hello" furniture:table="world" xml:lang="en_US" _xml:other="other1" _xmlfoo:other="other2" json:other="other3" json_1:other="other4"></TAttr></TableAttrs>`
str := string(data)
if str != want {
t.Errorf("Marshal:\nhave: %#q\nwant: %#q\n", str, want)
}
var dst TableAttrs
if err := Unmarshal(data, &dst); err != nil {
t.Errorf("Unmarshal: %v", err)
}
if dst != src {
t.Errorf("Unmarshal = %q, want %q", dst, src)
}
}
type MyCharData struct {
body string
}
func (m *MyCharData) UnmarshalXML(d *Decoder, start StartElement) error {
for {
t, err := d.Token()
if err == io.EOF { // found end of element
break
}
if err != nil {
return err
}
if char, ok := t.(CharData); ok {
m.body += string(char)
}
}
return nil
}
var _ Unmarshaler = (*MyCharData)(nil)
func (m *MyCharData) UnmarshalXMLAttr(attr Attr) error {
panic("must not call")
}
type MyAttr struct {
attr string
}
func (m *MyAttr) UnmarshalXMLAttr(attr Attr) error {
m.attr = attr.Value
return nil
}
var _ UnmarshalerAttr = (*MyAttr)(nil)
type MyStruct struct {
Data *MyCharData
Attr *MyAttr `xml:",attr"`
Data2 MyCharData
Attr2 MyAttr `xml:",attr"`
}
func TestUnmarshaler(t *testing.T) {
xml := `<?xml version="1.0" encoding="utf-8"?>
<MyStruct Attr="attr1" Attr2="attr2">
<Data>hello <!-- comment -->world</Data>
<Data2>howdy <!-- comment -->world</Data2>
</MyStruct>
`
var m MyStruct
if err := Unmarshal([]byte(xml), &m); err != nil {
t.Fatal(err)
}
if m.Data == nil || m.Attr == nil || m.Data.body != "hello world" || m.Attr.attr != "attr1" || m.Data2.body != "howdy world" || m.Attr2.attr != "attr2" {
t.Errorf("m=%#+v\n", m)
}
}
type Pea struct {
Cotelydon string
}
type Pod struct {
Pea interface{} `xml:"Pea"`
}
// https://golang.org/issue/6836
func TestUnmarshalIntoInterface(t *testing.T) {
pod := new(Pod)
pod.Pea = new(Pea)
xml := `<Pod><Pea><Cotelydon>Green stuff</Cotelydon></Pea></Pod>`
err := Unmarshal([]byte(xml), pod)
if err != nil {
t.Fatalf("failed to unmarshal %q: %v", xml, err)
}
pea, ok := pod.Pea.(*Pea)
if !ok {
t.Fatalf("unmarshalled into wrong type: have %T want *Pea", pod.Pea)
}
have, want := pea.Cotelydon, "Green stuff"
if have != want {
t.Errorf("failed to unmarshal into interface, have %q want %q", have, want)
}
}

View File

@ -1,371 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package xml
import (
"fmt"
"reflect"
"strings"
"sync"
)
// typeInfo holds details for the xml representation of a type.
type typeInfo struct {
xmlname *fieldInfo
fields []fieldInfo
}
// fieldInfo holds details for the xml representation of a single field.
type fieldInfo struct {
idx []int
name string
xmlns string
flags fieldFlags
parents []string
}
type fieldFlags int
const (
fElement fieldFlags = 1 << iota
fAttr
fCharData
fInnerXml
fComment
fAny
fOmitEmpty
fMode = fElement | fAttr | fCharData | fInnerXml | fComment | fAny
)
var tinfoMap = make(map[reflect.Type]*typeInfo)
var tinfoLock sync.RWMutex
var nameType = reflect.TypeOf(Name{})
// getTypeInfo returns the typeInfo structure with details necessary
// for marshalling and unmarshalling typ.
func getTypeInfo(typ reflect.Type) (*typeInfo, error) {
tinfoLock.RLock()
tinfo, ok := tinfoMap[typ]
tinfoLock.RUnlock()
if ok {
return tinfo, nil
}
tinfo = &typeInfo{}
if typ.Kind() == reflect.Struct && typ != nameType {
n := typ.NumField()
for i := 0; i < n; i++ {
f := typ.Field(i)
if f.PkgPath != "" || f.Tag.Get("xml") == "-" {
continue // Private field
}
// For embedded structs, embed its fields.
if f.Anonymous {
t := f.Type
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
if t.Kind() == reflect.Struct {
inner, err := getTypeInfo(t)
if err != nil {
return nil, err
}
if tinfo.xmlname == nil {
tinfo.xmlname = inner.xmlname
}
for _, finfo := range inner.fields {
finfo.idx = append([]int{i}, finfo.idx...)
if err := addFieldInfo(typ, tinfo, &finfo); err != nil {
return nil, err
}
}
continue
}
}
finfo, err := structFieldInfo(typ, &f)
if err != nil {
return nil, err
}
if f.Name == "XMLName" {
tinfo.xmlname = finfo
continue
}
// Add the field if it doesn't conflict with other fields.
if err := addFieldInfo(typ, tinfo, finfo); err != nil {
return nil, err
}
}
}
tinfoLock.Lock()
tinfoMap[typ] = tinfo
tinfoLock.Unlock()
return tinfo, nil
}
// structFieldInfo builds and returns a fieldInfo for f.
func structFieldInfo(typ reflect.Type, f *reflect.StructField) (*fieldInfo, error) {
finfo := &fieldInfo{idx: f.Index}
// Split the tag from the xml namespace if necessary.
tag := f.Tag.Get("xml")
if i := strings.Index(tag, " "); i >= 0 {
finfo.xmlns, tag = tag[:i], tag[i+1:]
}
// Parse flags.
tokens := strings.Split(tag, ",")
if len(tokens) == 1 {
finfo.flags = fElement
} else {
tag = tokens[0]
for _, flag := range tokens[1:] {
switch flag {
case "attr":
finfo.flags |= fAttr
case "chardata":
finfo.flags |= fCharData
case "innerxml":
finfo.flags |= fInnerXml
case "comment":
finfo.flags |= fComment
case "any":
finfo.flags |= fAny
case "omitempty":
finfo.flags |= fOmitEmpty
}
}
// Validate the flags used.
valid := true
switch mode := finfo.flags & fMode; mode {
case 0:
finfo.flags |= fElement
case fAttr, fCharData, fInnerXml, fComment, fAny:
if f.Name == "XMLName" || tag != "" && mode != fAttr {
valid = false
}
default:
// This will also catch multiple modes in a single field.
valid = false
}
if finfo.flags&fMode == fAny {
finfo.flags |= fElement
}
if finfo.flags&fOmitEmpty != 0 && finfo.flags&(fElement|fAttr) == 0 {
valid = false
}
if !valid {
return nil, fmt.Errorf("xml: invalid tag in field %s of type %s: %q",
f.Name, typ, f.Tag.Get("xml"))
}
}
// Use of xmlns without a name is not allowed.
if finfo.xmlns != "" && tag == "" {
return nil, fmt.Errorf("xml: namespace without name in field %s of type %s: %q",
f.Name, typ, f.Tag.Get("xml"))
}
if f.Name == "XMLName" {
// The XMLName field records the XML element name. Don't
// process it as usual because its name should default to
// empty rather than to the field name.
finfo.name = tag
return finfo, nil
}
if tag == "" {
// If the name part of the tag is completely empty, get
// default from XMLName of underlying struct if feasible,
// or field name otherwise.
if xmlname := lookupXMLName(f.Type); xmlname != nil {
finfo.xmlns, finfo.name = xmlname.xmlns, xmlname.name
} else {
finfo.name = f.Name
}
return finfo, nil
}
if finfo.xmlns == "" && finfo.flags&fAttr == 0 {
// If it's an element no namespace specified, get the default
// from the XMLName of enclosing struct if possible.
if xmlname := lookupXMLName(typ); xmlname != nil {
finfo.xmlns = xmlname.xmlns
}
}
// Prepare field name and parents.
parents := strings.Split(tag, ">")
if parents[0] == "" {
parents[0] = f.Name
}
if parents[len(parents)-1] == "" {
return nil, fmt.Errorf("xml: trailing '>' in field %s of type %s", f.Name, typ)
}
finfo.name = parents[len(parents)-1]
if len(parents) > 1 {
if (finfo.flags & fElement) == 0 {
return nil, fmt.Errorf("xml: %s chain not valid with %s flag", tag, strings.Join(tokens[1:], ","))
}
finfo.parents = parents[:len(parents)-1]
}
// If the field type has an XMLName field, the names must match
// so that the behavior of both marshalling and unmarshalling
// is straightforward and unambiguous.
if finfo.flags&fElement != 0 {
ftyp := f.Type
xmlname := lookupXMLName(ftyp)
if xmlname != nil && xmlname.name != finfo.name {
return nil, fmt.Errorf("xml: name %q in tag of %s.%s conflicts with name %q in %s.XMLName",
finfo.name, typ, f.Name, xmlname.name, ftyp)
}
}
return finfo, nil
}
// lookupXMLName returns the fieldInfo for typ's XMLName field
// in case it exists and has a valid xml field tag, otherwise
// it returns nil.
func lookupXMLName(typ reflect.Type) (xmlname *fieldInfo) {
for typ.Kind() == reflect.Ptr {
typ = typ.Elem()
}
if typ.Kind() != reflect.Struct {
return nil
}
for i, n := 0, typ.NumField(); i < n; i++ {
f := typ.Field(i)
if f.Name != "XMLName" {
continue
}
finfo, err := structFieldInfo(typ, &f)
if finfo.name != "" && err == nil {
return finfo
}
// Also consider errors as a non-existent field tag
// and let getTypeInfo itself report the error.
break
}
return nil
}
func min(a, b int) int {
if a <= b {
return a
}
return b
}
// addFieldInfo adds finfo to tinfo.fields if there are no
// conflicts, or if conflicts arise from previous fields that were
// obtained from deeper embedded structures than finfo. In the latter
// case, the conflicting entries are dropped.
// A conflict occurs when the path (parent + name) to a field is
// itself a prefix of another path, or when two paths match exactly.
// It is okay for field paths to share a common, shorter prefix.
func addFieldInfo(typ reflect.Type, tinfo *typeInfo, newf *fieldInfo) error {
var conflicts []int
Loop:
// First, figure all conflicts. Most working code will have none.
for i := range tinfo.fields {
oldf := &tinfo.fields[i]
if oldf.flags&fMode != newf.flags&fMode {
continue
}
if oldf.xmlns != "" && newf.xmlns != "" && oldf.xmlns != newf.xmlns {
continue
}
minl := min(len(newf.parents), len(oldf.parents))
for p := 0; p < minl; p++ {
if oldf.parents[p] != newf.parents[p] {
continue Loop
}
}
if len(oldf.parents) > len(newf.parents) {
if oldf.parents[len(newf.parents)] == newf.name {
conflicts = append(conflicts, i)
}
} else if len(oldf.parents) < len(newf.parents) {
if newf.parents[len(oldf.parents)] == oldf.name {
conflicts = append(conflicts, i)
}
} else {
if newf.name == oldf.name {
conflicts = append(conflicts, i)
}
}
}
// Without conflicts, add the new field and return.
if conflicts == nil {
tinfo.fields = append(tinfo.fields, *newf)
return nil
}
// If any conflict is shallower, ignore the new field.
// This matches the Go field resolution on embedding.
for _, i := range conflicts {
if len(tinfo.fields[i].idx) < len(newf.idx) {
return nil
}
}
// Otherwise, if any of them is at the same depth level, it's an error.
for _, i := range conflicts {
oldf := &tinfo.fields[i]
if len(oldf.idx) == len(newf.idx) {
f1 := typ.FieldByIndex(oldf.idx)
f2 := typ.FieldByIndex(newf.idx)
return &TagPathError{typ, f1.Name, f1.Tag.Get("xml"), f2.Name, f2.Tag.Get("xml")}
}
}
// Otherwise, the new field is shallower, and thus takes precedence,
// so drop the conflicting fields from tinfo and append the new one.
for c := len(conflicts) - 1; c >= 0; c-- {
i := conflicts[c]
copy(tinfo.fields[i:], tinfo.fields[i+1:])
tinfo.fields = tinfo.fields[:len(tinfo.fields)-1]
}
tinfo.fields = append(tinfo.fields, *newf)
return nil
}
// A TagPathError represents an error in the unmarshalling process
// caused by the use of field tags with conflicting paths.
type TagPathError struct {
Struct reflect.Type
Field1, Tag1 string
Field2, Tag2 string
}
func (e *TagPathError) Error() string {
return fmt.Sprintf("%s field %q with tag %q conflicts with field %q with tag %q", e.Struct, e.Field1, e.Tag1, e.Field2, e.Tag2)
}
// value returns v's field value corresponding to finfo.
// It's equivalent to v.FieldByIndex(finfo.idx), but initializes
// and dereferences pointers as necessary.
func (finfo *fieldInfo) value(v reflect.Value) reflect.Value {
for i, x := range finfo.idx {
if i > 0 {
t := v.Type()
if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct {
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
v = v.Elem()
}
}
v = v.Field(x)
}
return v
}

File diff suppressed because it is too large Load Diff

View File

@ -1,752 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package xml
import (
"bytes"
"fmt"
"io"
"reflect"
"strings"
"testing"
"unicode/utf8"
)
const testInput = `
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<body xmlns:foo="ns1" xmlns="ns2" xmlns:tag="ns3" ` +
"\r\n\t" + ` >
<hello lang="en">World &lt;&gt;&apos;&quot; &#x767d;&#40300;</hello>
<query>&; &is-it;</query>
<goodbye />
<outer foo:attr="value" xmlns:tag="ns4">
<inner/>
</outer>
<tag:name>
<![CDATA[Some text here.]]>
</tag:name>
</body><!-- missing final newline -->`
var testEntity = map[string]string{"何": "What", "is-it": "is it?"}
var rawTokens = []Token{
CharData("\n"),
ProcInst{"xml", []byte(`version="1.0" encoding="UTF-8"`)},
CharData("\n"),
Directive(`DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"`),
CharData("\n"),
StartElement{Name{"", "body"}, []Attr{{Name{"xmlns", "foo"}, "ns1"}, {Name{"", "xmlns"}, "ns2"}, {Name{"xmlns", "tag"}, "ns3"}}},
CharData("\n "),
StartElement{Name{"", "hello"}, []Attr{{Name{"", "lang"}, "en"}}},
CharData("World <>'\" 白鵬翔"),
EndElement{Name{"", "hello"}},
CharData("\n "),
StartElement{Name{"", "query"}, []Attr{}},
CharData("What is it?"),
EndElement{Name{"", "query"}},
CharData("\n "),
StartElement{Name{"", "goodbye"}, []Attr{}},
EndElement{Name{"", "goodbye"}},
CharData("\n "),
StartElement{Name{"", "outer"}, []Attr{{Name{"foo", "attr"}, "value"}, {Name{"xmlns", "tag"}, "ns4"}}},
CharData("\n "),
StartElement{Name{"", "inner"}, []Attr{}},
EndElement{Name{"", "inner"}},
CharData("\n "),
EndElement{Name{"", "outer"}},
CharData("\n "),
StartElement{Name{"tag", "name"}, []Attr{}},
CharData("\n "),
CharData("Some text here."),
CharData("\n "),
EndElement{Name{"tag", "name"}},
CharData("\n"),
EndElement{Name{"", "body"}},
Comment(" missing final newline "),
}
var cookedTokens = []Token{
CharData("\n"),
ProcInst{"xml", []byte(`version="1.0" encoding="UTF-8"`)},
CharData("\n"),
Directive(`DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"`),
CharData("\n"),
StartElement{Name{"ns2", "body"}, []Attr{{Name{"xmlns", "foo"}, "ns1"}, {Name{"", "xmlns"}, "ns2"}, {Name{"xmlns", "tag"}, "ns3"}}},
CharData("\n "),
StartElement{Name{"ns2", "hello"}, []Attr{{Name{"", "lang"}, "en"}}},
CharData("World <>'\" 白鵬翔"),
EndElement{Name{"ns2", "hello"}},
CharData("\n "),
StartElement{Name{"ns2", "query"}, []Attr{}},
CharData("What is it?"),
EndElement{Name{"ns2", "query"}},
CharData("\n "),
StartElement{Name{"ns2", "goodbye"}, []Attr{}},
EndElement{Name{"ns2", "goodbye"}},
CharData("\n "),
StartElement{Name{"ns2", "outer"}, []Attr{{Name{"ns1", "attr"}, "value"}, {Name{"xmlns", "tag"}, "ns4"}}},
CharData("\n "),
StartElement{Name{"ns2", "inner"}, []Attr{}},
EndElement{Name{"ns2", "inner"}},
CharData("\n "),
EndElement{Name{"ns2", "outer"}},
CharData("\n "),
StartElement{Name{"ns3", "name"}, []Attr{}},
CharData("\n "),
CharData("Some text here."),
CharData("\n "),
EndElement{Name{"ns3", "name"}},
CharData("\n"),
EndElement{Name{"ns2", "body"}},
Comment(" missing final newline "),
}
const testInputAltEncoding = `
<?xml version="1.0" encoding="x-testing-uppercase"?>
<TAG>VALUE</TAG>`
var rawTokensAltEncoding = []Token{
CharData("\n"),
ProcInst{"xml", []byte(`version="1.0" encoding="x-testing-uppercase"`)},
CharData("\n"),
StartElement{Name{"", "tag"}, []Attr{}},
CharData("value"),
EndElement{Name{"", "tag"}},
}
var xmlInput = []string{
// unexpected EOF cases
"<",
"<t",
"<t ",
"<t/",
"<!",
"<!-",
"<!--",
"<!--c-",
"<!--c--",
"<!d",
"<t></",
"<t></t",
"<?",
"<?p",
"<t a",
"<t a=",
"<t a='",
"<t a=''",
"<t/><![",
"<t/><![C",
"<t/><![CDATA[d",
"<t/><![CDATA[d]",
"<t/><![CDATA[d]]",
// other Syntax errors
"<>",
"<t/a",
"<0 />",
"<?0 >",
// "<!0 >", // let the Token() caller handle
"</0>",
"<t 0=''>",
"<t a='&'>",
"<t a='<'>",
"<t>&nbspc;</t>",
"<t a>",
"<t a=>",
"<t a=v>",
// "<![CDATA[d]]>", // let the Token() caller handle
"<t></e>",
"<t></>",
"<t></t!",
"<t>cdata]]></t>",
}
func TestRawToken(t *testing.T) {
d := NewDecoder(strings.NewReader(testInput))
d.Entity = testEntity
testRawToken(t, d, testInput, rawTokens)
}
const nonStrictInput = `
<tag>non&entity</tag>
<tag>&unknown;entity</tag>
<tag>&#123</tag>
<tag>&#zzz;</tag>
<tag>&なまえ3;</tag>
<tag>&lt-gt;</tag>
<tag>&;</tag>
<tag>&0a;</tag>
`
var nonStringEntity = map[string]string{"": "oops!", "0a": "oops!"}
var nonStrictTokens = []Token{
CharData("\n"),
StartElement{Name{"", "tag"}, []Attr{}},
CharData("non&entity"),
EndElement{Name{"", "tag"}},
CharData("\n"),
StartElement{Name{"", "tag"}, []Attr{}},
CharData("&unknown;entity"),
EndElement{Name{"", "tag"}},
CharData("\n"),
StartElement{Name{"", "tag"}, []Attr{}},
CharData("&#123"),
EndElement{Name{"", "tag"}},
CharData("\n"),
StartElement{Name{"", "tag"}, []Attr{}},
CharData("&#zzz;"),
EndElement{Name{"", "tag"}},
CharData("\n"),
StartElement{Name{"", "tag"}, []Attr{}},
CharData("&なまえ3;"),
EndElement{Name{"", "tag"}},
CharData("\n"),
StartElement{Name{"", "tag"}, []Attr{}},
CharData("&lt-gt;"),
EndElement{Name{"", "tag"}},
CharData("\n"),
StartElement{Name{"", "tag"}, []Attr{}},
CharData("&;"),
EndElement{Name{"", "tag"}},
CharData("\n"),
StartElement{Name{"", "tag"}, []Attr{}},
CharData("&0a;"),
EndElement{Name{"", "tag"}},
CharData("\n"),
}
func TestNonStrictRawToken(t *testing.T) {
d := NewDecoder(strings.NewReader(nonStrictInput))
d.Strict = false
testRawToken(t, d, nonStrictInput, nonStrictTokens)
}
type downCaser struct {
t *testing.T
r io.ByteReader
}
func (d *downCaser) ReadByte() (c byte, err error) {
c, err = d.r.ReadByte()
if c >= 'A' && c <= 'Z' {
c += 'a' - 'A'
}
return
}
func (d *downCaser) Read(p []byte) (int, error) {
d.t.Fatalf("unexpected Read call on downCaser reader")
panic("unreachable")
}
func TestRawTokenAltEncoding(t *testing.T) {
d := NewDecoder(strings.NewReader(testInputAltEncoding))
d.CharsetReader = func(charset string, input io.Reader) (io.Reader, error) {
if charset != "x-testing-uppercase" {
t.Fatalf("unexpected charset %q", charset)
}
return &downCaser{t, input.(io.ByteReader)}, nil
}
testRawToken(t, d, testInputAltEncoding, rawTokensAltEncoding)
}
func TestRawTokenAltEncodingNoConverter(t *testing.T) {
d := NewDecoder(strings.NewReader(testInputAltEncoding))
token, err := d.RawToken()
if token == nil {
t.Fatalf("expected a token on first RawToken call")
}
if err != nil {
t.Fatal(err)
}
token, err = d.RawToken()
if token != nil {
t.Errorf("expected a nil token; got %#v", token)
}
if err == nil {
t.Fatalf("expected an error on second RawToken call")
}
const encoding = "x-testing-uppercase"
if !strings.Contains(err.Error(), encoding) {
t.Errorf("expected error to contain %q; got error: %v",
encoding, err)
}
}
func testRawToken(t *testing.T, d *Decoder, raw string, rawTokens []Token) {
lastEnd := int64(0)
for i, want := range rawTokens {
start := d.InputOffset()
have, err := d.RawToken()
end := d.InputOffset()
if err != nil {
t.Fatalf("token %d: unexpected error: %s", i, err)
}
if !reflect.DeepEqual(have, want) {
var shave, swant string
if _, ok := have.(CharData); ok {
shave = fmt.Sprintf("CharData(%q)", have)
} else {
shave = fmt.Sprintf("%#v", have)
}
if _, ok := want.(CharData); ok {
swant = fmt.Sprintf("CharData(%q)", want)
} else {
swant = fmt.Sprintf("%#v", want)
}
t.Errorf("token %d = %s, want %s", i, shave, swant)
}
// Check that InputOffset returned actual token.
switch {
case start < lastEnd:
t.Errorf("token %d: position [%d,%d) for %T is before previous token", i, start, end, have)
case start >= end:
// Special case: EndElement can be synthesized.
if start == end && end == lastEnd {
break
}
t.Errorf("token %d: position [%d,%d) for %T is empty", i, start, end, have)
case end > int64(len(raw)):
t.Errorf("token %d: position [%d,%d) for %T extends beyond input", i, start, end, have)
default:
text := raw[start:end]
if strings.ContainsAny(text, "<>") && (!strings.HasPrefix(text, "<") || !strings.HasSuffix(text, ">")) {
t.Errorf("token %d: misaligned raw token %#q for %T", i, text, have)
}
}
lastEnd = end
}
}
// Ensure that directives (specifically !DOCTYPE) include the complete
// text of any nested directives, noting that < and > do not change
// nesting depth if they are in single or double quotes.
var nestedDirectivesInput = `
<!DOCTYPE [<!ENTITY rdf "http://www.w3.org/1999/02/22-rdf-syntax-ns#">]>
<!DOCTYPE [<!ENTITY xlt ">">]>
<!DOCTYPE [<!ENTITY xlt "<">]>
<!DOCTYPE [<!ENTITY xlt '>'>]>
<!DOCTYPE [<!ENTITY xlt '<'>]>
<!DOCTYPE [<!ENTITY xlt '">'>]>
<!DOCTYPE [<!ENTITY xlt "'<">]>
`
var nestedDirectivesTokens = []Token{
CharData("\n"),
Directive(`DOCTYPE [<!ENTITY rdf "http://www.w3.org/1999/02/22-rdf-syntax-ns#">]`),
CharData("\n"),
Directive(`DOCTYPE [<!ENTITY xlt ">">]`),
CharData("\n"),
Directive(`DOCTYPE [<!ENTITY xlt "<">]`),
CharData("\n"),
Directive(`DOCTYPE [<!ENTITY xlt '>'>]`),
CharData("\n"),
Directive(`DOCTYPE [<!ENTITY xlt '<'>]`),
CharData("\n"),
Directive(`DOCTYPE [<!ENTITY xlt '">'>]`),
CharData("\n"),
Directive(`DOCTYPE [<!ENTITY xlt "'<">]`),
CharData("\n"),
}
func TestNestedDirectives(t *testing.T) {
d := NewDecoder(strings.NewReader(nestedDirectivesInput))
for i, want := range nestedDirectivesTokens {
have, err := d.Token()
if err != nil {
t.Fatalf("token %d: unexpected error: %s", i, err)
}
if !reflect.DeepEqual(have, want) {
t.Errorf("token %d = %#v want %#v", i, have, want)
}
}
}
func TestToken(t *testing.T) {
d := NewDecoder(strings.NewReader(testInput))
d.Entity = testEntity
for i, want := range cookedTokens {
have, err := d.Token()
if err != nil {
t.Fatalf("token %d: unexpected error: %s", i, err)
}
if !reflect.DeepEqual(have, want) {
t.Errorf("token %d = %#v want %#v", i, have, want)
}
}
}
func TestSyntax(t *testing.T) {
for i := range xmlInput {
d := NewDecoder(strings.NewReader(xmlInput[i]))
var err error
for _, err = d.Token(); err == nil; _, err = d.Token() {
}
if _, ok := err.(*SyntaxError); !ok {
t.Fatalf(`xmlInput "%s": expected SyntaxError not received`, xmlInput[i])
}
}
}
type allScalars struct {
True1 bool
True2 bool
False1 bool
False2 bool
Int int
Int8 int8
Int16 int16
Int32 int32
Int64 int64
Uint int
Uint8 uint8
Uint16 uint16
Uint32 uint32
Uint64 uint64
Uintptr uintptr
Float32 float32
Float64 float64
String string
PtrString *string
}
var all = allScalars{
True1: true,
True2: true,
False1: false,
False2: false,
Int: 1,
Int8: -2,
Int16: 3,
Int32: -4,
Int64: 5,
Uint: 6,
Uint8: 7,
Uint16: 8,
Uint32: 9,
Uint64: 10,
Uintptr: 11,
Float32: 13.0,
Float64: 14.0,
String: "15",
PtrString: &sixteen,
}
var sixteen = "16"
const testScalarsInput = `<allscalars>
<True1>true</True1>
<True2>1</True2>
<False1>false</False1>
<False2>0</False2>
<Int>1</Int>
<Int8>-2</Int8>
<Int16>3</Int16>
<Int32>-4</Int32>
<Int64>5</Int64>
<Uint>6</Uint>
<Uint8>7</Uint8>
<Uint16>8</Uint16>
<Uint32>9</Uint32>
<Uint64>10</Uint64>
<Uintptr>11</Uintptr>
<Float>12.0</Float>
<Float32>13.0</Float32>
<Float64>14.0</Float64>
<String>15</String>
<PtrString>16</PtrString>
</allscalars>`
func TestAllScalars(t *testing.T) {
var a allScalars
err := Unmarshal([]byte(testScalarsInput), &a)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(a, all) {
t.Errorf("have %+v want %+v", a, all)
}
}
type item struct {
Field_a string
}
func TestIssue569(t *testing.T) {
data := `<item><Field_a>abcd</Field_a></item>`
var i item
err := Unmarshal([]byte(data), &i)
if err != nil || i.Field_a != "abcd" {
t.Fatal("Expecting abcd")
}
}
func TestUnquotedAttrs(t *testing.T) {
data := "<tag attr=azAZ09:-_\t>"
d := NewDecoder(strings.NewReader(data))
d.Strict = false
token, err := d.Token()
if _, ok := err.(*SyntaxError); ok {
t.Errorf("Unexpected error: %v", err)
}
if token.(StartElement).Name.Local != "tag" {
t.Errorf("Unexpected tag name: %v", token.(StartElement).Name.Local)
}
attr := token.(StartElement).Attr[0]
if attr.Value != "azAZ09:-_" {
t.Errorf("Unexpected attribute value: %v", attr.Value)
}
if attr.Name.Local != "attr" {
t.Errorf("Unexpected attribute name: %v", attr.Name.Local)
}
}
func TestValuelessAttrs(t *testing.T) {
tests := [][3]string{
{"<p nowrap>", "p", "nowrap"},
{"<p nowrap >", "p", "nowrap"},
{"<input checked/>", "input", "checked"},
{"<input checked />", "input", "checked"},
}
for _, test := range tests {
d := NewDecoder(strings.NewReader(test[0]))
d.Strict = false
token, err := d.Token()
if _, ok := err.(*SyntaxError); ok {
t.Errorf("Unexpected error: %v", err)
}
if token.(StartElement).Name.Local != test[1] {
t.Errorf("Unexpected tag name: %v", token.(StartElement).Name.Local)
}
attr := token.(StartElement).Attr[0]
if attr.Value != test[2] {
t.Errorf("Unexpected attribute value: %v", attr.Value)
}
if attr.Name.Local != test[2] {
t.Errorf("Unexpected attribute name: %v", attr.Name.Local)
}
}
}
func TestCopyTokenCharData(t *testing.T) {
data := []byte("same data")
var tok1 Token = CharData(data)
tok2 := CopyToken(tok1)
if !reflect.DeepEqual(tok1, tok2) {
t.Error("CopyToken(CharData) != CharData")
}
data[1] = 'o'
if reflect.DeepEqual(tok1, tok2) {
t.Error("CopyToken(CharData) uses same buffer.")
}
}
func TestCopyTokenStartElement(t *testing.T) {
elt := StartElement{Name{"", "hello"}, []Attr{{Name{"", "lang"}, "en"}}}
var tok1 Token = elt
tok2 := CopyToken(tok1)
if tok1.(StartElement).Attr[0].Value != "en" {
t.Error("CopyToken overwrote Attr[0]")
}
if !reflect.DeepEqual(tok1, tok2) {
t.Error("CopyToken(StartElement) != StartElement")
}
tok1.(StartElement).Attr[0] = Attr{Name{"", "lang"}, "de"}
if reflect.DeepEqual(tok1, tok2) {
t.Error("CopyToken(CharData) uses same buffer.")
}
}
func TestSyntaxErrorLineNum(t *testing.T) {
testInput := "<P>Foo<P>\n\n<P>Bar</>\n"
d := NewDecoder(strings.NewReader(testInput))
var err error
for _, err = d.Token(); err == nil; _, err = d.Token() {
}
synerr, ok := err.(*SyntaxError)
if !ok {
t.Error("Expected SyntaxError.")
}
if synerr.Line != 3 {
t.Error("SyntaxError didn't have correct line number.")
}
}
func TestTrailingRawToken(t *testing.T) {
input := `<FOO></FOO> `
d := NewDecoder(strings.NewReader(input))
var err error
for _, err = d.RawToken(); err == nil; _, err = d.RawToken() {
}
if err != io.EOF {
t.Fatalf("d.RawToken() = _, %v, want _, io.EOF", err)
}
}
func TestTrailingToken(t *testing.T) {
input := `<FOO></FOO> `
d := NewDecoder(strings.NewReader(input))
var err error
for _, err = d.Token(); err == nil; _, err = d.Token() {
}
if err != io.EOF {
t.Fatalf("d.Token() = _, %v, want _, io.EOF", err)
}
}
func TestEntityInsideCDATA(t *testing.T) {
input := `<test><![CDATA[ &val=foo ]]></test>`
d := NewDecoder(strings.NewReader(input))
var err error
for _, err = d.Token(); err == nil; _, err = d.Token() {
}
if err != io.EOF {
t.Fatalf("d.Token() = _, %v, want _, io.EOF", err)
}
}
var characterTests = []struct {
in string
err string
}{
{"\x12<doc/>", "illegal character code U+0012"},
{"<?xml version=\"1.0\"?>\x0b<doc/>", "illegal character code U+000B"},
{"\xef\xbf\xbe<doc/>", "illegal character code U+FFFE"},
{"<?xml version=\"1.0\"?><doc>\r\n<hiya/>\x07<toots/></doc>", "illegal character code U+0007"},
{"<?xml version=\"1.0\"?><doc \x12='value'>what's up</doc>", "expected attribute name in element"},
{"<doc>&abc\x01;</doc>", "invalid character entity &abc (no semicolon)"},
{"<doc>&\x01;</doc>", "invalid character entity & (no semicolon)"},
{"<doc>&\xef\xbf\xbe;</doc>", "invalid character entity &\uFFFE;"},
{"<doc>&hello;</doc>", "invalid character entity &hello;"},
}
func TestDisallowedCharacters(t *testing.T) {
for i, tt := range characterTests {
d := NewDecoder(strings.NewReader(tt.in))
var err error
for err == nil {
_, err = d.Token()
}
synerr, ok := err.(*SyntaxError)
if !ok {
t.Fatalf("input %d d.Token() = _, %v, want _, *SyntaxError", i, err)
}
if synerr.Msg != tt.err {
t.Fatalf("input %d synerr.Msg wrong: want %q, got %q", i, tt.err, synerr.Msg)
}
}
}
type procInstEncodingTest struct {
expect, got string
}
var procInstTests = []struct {
input string
expect [2]string
}{
{`version="1.0" encoding="utf-8"`, [2]string{"1.0", "utf-8"}},
{`version="1.0" encoding='utf-8'`, [2]string{"1.0", "utf-8"}},
{`version="1.0" encoding='utf-8' `, [2]string{"1.0", "utf-8"}},
{`version="1.0" encoding=utf-8`, [2]string{"1.0", ""}},
{`encoding="FOO" `, [2]string{"", "FOO"}},
}
func TestProcInstEncoding(t *testing.T) {
for _, test := range procInstTests {
if got := procInst("version", test.input); got != test.expect[0] {
t.Errorf("procInst(version, %q) = %q; want %q", test.input, got, test.expect[0])
}
if got := procInst("encoding", test.input); got != test.expect[1] {
t.Errorf("procInst(encoding, %q) = %q; want %q", test.input, got, test.expect[1])
}
}
}
// Ensure that directives with comments include the complete
// text of any nested directives.
var directivesWithCommentsInput = `
<!DOCTYPE [<!-- a comment --><!ENTITY rdf "http://www.w3.org/1999/02/22-rdf-syntax-ns#">]>
<!DOCTYPE [<!ENTITY go "Golang"><!-- a comment-->]>
<!DOCTYPE <!-> <!> <!----> <!-->--> <!--->--> [<!ENTITY go "Golang"><!-- a comment-->]>
`
var directivesWithCommentsTokens = []Token{
CharData("\n"),
Directive(`DOCTYPE [<!ENTITY rdf "http://www.w3.org/1999/02/22-rdf-syntax-ns#">]`),
CharData("\n"),
Directive(`DOCTYPE [<!ENTITY go "Golang">]`),
CharData("\n"),
Directive(`DOCTYPE <!-> <!> [<!ENTITY go "Golang">]`),
CharData("\n"),
}
func TestDirectivesWithComments(t *testing.T) {
d := NewDecoder(strings.NewReader(directivesWithCommentsInput))
for i, want := range directivesWithCommentsTokens {
have, err := d.Token()
if err != nil {
t.Fatalf("token %d: unexpected error: %s", i, err)
}
if !reflect.DeepEqual(have, want) {
t.Errorf("token %d = %#v want %#v", i, have, want)
}
}
}
// Writer whose Write method always returns an error.
type errWriter struct{}
func (errWriter) Write(p []byte) (n int, err error) { return 0, fmt.Errorf("unwritable") }
func TestEscapeTextIOErrors(t *testing.T) {
expectErr := "unwritable"
err := EscapeText(errWriter{}, []byte{'A'})
if err == nil || err.Error() != expectErr {
t.Errorf("have %v, want %v", err, expectErr)
}
}
func TestEscapeTextInvalidChar(t *testing.T) {
input := []byte("A \x00 terminated string.")
expected := "A \uFFFD terminated string."
buff := new(bytes.Buffer)
if err := EscapeText(buff, input); err != nil {
t.Fatalf("have %v, want nil", err)
}
text := buff.String()
if text != expected {
t.Errorf("have %v, want %v", text, expected)
}
}
func TestIssue5880(t *testing.T) {
type T []byte
data, err := Marshal(T{192, 168, 0, 1})
if err != nil {
t.Errorf("Marshal error: %v", err)
}
if !utf8.Valid(data) {
t.Errorf("Marshal generated invalid UTF-8: %x", data)
}
}

View File

@ -1,95 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build ignore
// +build ignore
/*
This program is a server for the WebDAV 'litmus' compliance test at
http://www.webdav.org/neon/litmus/
To run the test:
go run litmus_test_server.go
and separately, from the downloaded litmus-xxx directory:
make URL=http://localhost:9999/ check
*/
package main
import (
"flag"
"fmt"
"log"
"net/http"
"net/url"
"golang.org/x/net/webdav"
)
var port = flag.Int("port", 9999, "server port")
func main() {
flag.Parse()
log.SetFlags(0)
h := &webdav.Handler{
FileSystem: webdav.NewMemFS(),
LockSystem: webdav.NewMemLS(),
Logger: func(r *http.Request, err error) {
litmus := r.Header.Get("X-Litmus")
if len(litmus) > 19 {
litmus = litmus[:16] + "..."
}
switch r.Method {
case "COPY", "MOVE":
dst := ""
if u, err := url.Parse(r.Header.Get("Destination")); err == nil {
dst = u.Path
}
o := r.Header.Get("Overwrite")
log.Printf("%-20s%-10s%-30s%-30so=%-2s%v", litmus, r.Method, r.URL.Path, dst, o, err)
default:
log.Printf("%-20s%-10s%-30s%v", litmus, r.Method, r.URL.Path, err)
}
},
}
// The next line would normally be:
// http.Handle("/", h)
// but we wrap that HTTP handler h to cater for a special case.
//
// The propfind_invalid2 litmus test case expects an empty namespace prefix
// declaration to be an error. The FAQ in the webdav litmus test says:
//
// "What does the "propfind_invalid2" test check for?...
//
// If a request was sent with an XML body which included an empty namespace
// prefix declaration (xmlns:ns1=""), then the server must reject that with
// a "400 Bad Request" response, as it is invalid according to the XML
// Namespace specification."
//
// On the other hand, the Go standard library's encoding/xml package
// accepts an empty xmlns namespace, as per the discussion at
// https://github.com/golang/go/issues/8068
//
// Empty namespaces seem disallowed in the second (2006) edition of the XML
// standard, but allowed in a later edition. The grammar differs between
// http://www.w3.org/TR/2006/REC-xml-names-20060816/#ns-decl and
// http://www.w3.org/TR/REC-xml-names/#dt-prefix
//
// Thus, we assume that the propfind_invalid2 test is obsolete, and
// hard-code the 400 Bad Request response that the test expects.
http.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Header.Get("X-Litmus") == "props: 3 (propfind_invalid2)" {
http.Error(w, "400 Bad Request", http.StatusBadRequest)
return
}
h.ServeHTTP(w, r)
}))
addr := fmt.Sprintf(":%d", *port)
log.Printf("Serving %v", addr)
log.Fatal(http.ListenAndServe(addr, nil))
}

View File

@ -1,445 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package webdav
import (
"container/heap"
"errors"
"strconv"
"strings"
"sync"
"time"
)
var (
// ErrConfirmationFailed is returned by a LockSystem's Confirm method.
ErrConfirmationFailed = errors.New("webdav: confirmation failed")
// ErrForbidden is returned by a LockSystem's Unlock method.
ErrForbidden = errors.New("webdav: forbidden")
// ErrLocked is returned by a LockSystem's Create, Refresh and Unlock methods.
ErrLocked = errors.New("webdav: locked")
// ErrNoSuchLock is returned by a LockSystem's Refresh and Unlock methods.
ErrNoSuchLock = errors.New("webdav: no such lock")
)
// Condition can match a WebDAV resource, based on a token or ETag.
// Exactly one of Token and ETag should be non-empty.
type Condition struct {
Not bool
Token string
ETag string
}
// LockSystem manages access to a collection of named resources. The elements
// in a lock name are separated by slash ('/', U+002F) characters, regardless
// of host operating system convention.
type LockSystem interface {
// Confirm confirms that the caller can claim all of the locks specified by
// the given conditions, and that holding the union of all of those locks
// gives exclusive access to all of the named resources. Up to two resources
// can be named. Empty names are ignored.
//
// Exactly one of release and err will be non-nil. If release is non-nil,
// all of the requested locks are held until release is called. Calling
// release does not unlock the lock, in the WebDAV UNLOCK sense, but once
// Confirm has confirmed that a lock claim is valid, that lock cannot be
// Confirmed again until it has been released.
//
// If Confirm returns ErrConfirmationFailed then the Handler will continue
// to try any other set of locks presented (a WebDAV HTTP request can
// present more than one set of locks). If it returns any other non-nil
// error, the Handler will write a "500 Internal Server Error" HTTP status.
Confirm(now time.Time, name0, name1 string, conditions ...Condition) (release func(), err error)
// Create creates a lock with the given depth, duration, owner and root
// (name). The depth will either be negative (meaning infinite) or zero.
//
// If Create returns ErrLocked then the Handler will write a "423 Locked"
// HTTP status. If it returns any other non-nil error, the Handler will
// write a "500 Internal Server Error" HTTP status.
//
// See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.10.6 for
// when to use each error.
//
// The token returned identifies the created lock. It should be an absolute
// URI as defined by RFC 3986, Section 4.3. In particular, it should not
// contain whitespace.
Create(now time.Time, details LockDetails) (token string, err error)
// Refresh refreshes the lock with the given token.
//
// If Refresh returns ErrLocked then the Handler will write a "423 Locked"
// HTTP Status. If Refresh returns ErrNoSuchLock then the Handler will write
// a "412 Precondition Failed" HTTP Status. If it returns any other non-nil
// error, the Handler will write a "500 Internal Server Error" HTTP status.
//
// See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.10.6 for
// when to use each error.
Refresh(now time.Time, token string, duration time.Duration) (LockDetails, error)
// Unlock unlocks the lock with the given token.
//
// If Unlock returns ErrForbidden then the Handler will write a "403
// Forbidden" HTTP Status. If Unlock returns ErrLocked then the Handler
// will write a "423 Locked" HTTP status. If Unlock returns ErrNoSuchLock
// then the Handler will write a "409 Conflict" HTTP Status. If it returns
// any other non-nil error, the Handler will write a "500 Internal Server
// Error" HTTP status.
//
// See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.11.1 for
// when to use each error.
Unlock(now time.Time, token string) error
}
// LockDetails are a lock's metadata.
type LockDetails struct {
// Root is the root resource name being locked. For a zero-depth lock, the
// root is the only resource being locked.
Root string
// Duration is the lock timeout. A negative duration means infinite.
Duration time.Duration
// OwnerXML is the verbatim <owner> XML given in a LOCK HTTP request.
//
// TODO: does the "verbatim" nature play well with XML namespaces?
// Does the OwnerXML field need to have more structure? See
// https://codereview.appspot.com/175140043/#msg2
OwnerXML string
// ZeroDepth is whether the lock has zero depth. If it does not have zero
// depth, it has infinite depth.
ZeroDepth bool
}
// NewMemLS returns a new in-memory LockSystem.
func NewMemLS() LockSystem {
return &memLS{
byName: make(map[string]*memLSNode),
byToken: make(map[string]*memLSNode),
gen: uint64(time.Now().Unix()),
}
}
type memLS struct {
mu sync.Mutex
byName map[string]*memLSNode
byToken map[string]*memLSNode
gen uint64
// byExpiry only contains those nodes whose LockDetails have a finite
// Duration and are yet to expire.
byExpiry byExpiry
}
func (m *memLS) nextToken() string {
m.gen++
return strconv.FormatUint(m.gen, 10)
}
func (m *memLS) collectExpiredNodes(now time.Time) {
for len(m.byExpiry) > 0 {
if now.Before(m.byExpiry[0].expiry) {
break
}
m.remove(m.byExpiry[0])
}
}
func (m *memLS) Confirm(now time.Time, name0, name1 string, conditions ...Condition) (func(), error) {
m.mu.Lock()
defer m.mu.Unlock()
m.collectExpiredNodes(now)
var n0, n1 *memLSNode
if name0 != "" {
if n0 = m.lookup(slashClean(name0), conditions...); n0 == nil {
return nil, ErrConfirmationFailed
}
}
if name1 != "" {
if n1 = m.lookup(slashClean(name1), conditions...); n1 == nil {
return nil, ErrConfirmationFailed
}
}
// Don't hold the same node twice.
if n1 == n0 {
n1 = nil
}
if n0 != nil {
m.hold(n0)
}
if n1 != nil {
m.hold(n1)
}
return func() {
m.mu.Lock()
defer m.mu.Unlock()
if n1 != nil {
m.unhold(n1)
}
if n0 != nil {
m.unhold(n0)
}
}, nil
}
// lookup returns the node n that locks the named resource, provided that n
// matches at least one of the given conditions and that lock isn't held by
// another party. Otherwise, it returns nil.
//
// n may be a parent of the named resource, if n is an infinite depth lock.
func (m *memLS) lookup(name string, conditions ...Condition) (n *memLSNode) {
// TODO: support Condition.Not and Condition.ETag.
for _, c := range conditions {
n = m.byToken[c.Token]
if n == nil || n.held {
continue
}
if name == n.details.Root {
return n
}
if n.details.ZeroDepth {
continue
}
if n.details.Root == "/" || strings.HasPrefix(name, n.details.Root+"/") {
return n
}
}
return nil
}
func (m *memLS) hold(n *memLSNode) {
if n.held {
panic("webdav: memLS inconsistent held state")
}
n.held = true
if n.details.Duration >= 0 && n.byExpiryIndex >= 0 {
heap.Remove(&m.byExpiry, n.byExpiryIndex)
}
}
func (m *memLS) unhold(n *memLSNode) {
if !n.held {
panic("webdav: memLS inconsistent held state")
}
n.held = false
if n.details.Duration >= 0 {
heap.Push(&m.byExpiry, n)
}
}
func (m *memLS) Create(now time.Time, details LockDetails) (string, error) {
m.mu.Lock()
defer m.mu.Unlock()
m.collectExpiredNodes(now)
details.Root = slashClean(details.Root)
if !m.canCreate(details.Root, details.ZeroDepth) {
return "", ErrLocked
}
n := m.create(details.Root)
n.token = m.nextToken()
m.byToken[n.token] = n
n.details = details
if n.details.Duration >= 0 {
n.expiry = now.Add(n.details.Duration)
heap.Push(&m.byExpiry, n)
}
return n.token, nil
}
func (m *memLS) Refresh(now time.Time, token string, duration time.Duration) (LockDetails, error) {
m.mu.Lock()
defer m.mu.Unlock()
m.collectExpiredNodes(now)
n := m.byToken[token]
if n == nil {
return LockDetails{}, ErrNoSuchLock
}
if n.held {
return LockDetails{}, ErrLocked
}
if n.byExpiryIndex >= 0 {
heap.Remove(&m.byExpiry, n.byExpiryIndex)
}
n.details.Duration = duration
if n.details.Duration >= 0 {
n.expiry = now.Add(n.details.Duration)
heap.Push(&m.byExpiry, n)
}
return n.details, nil
}
func (m *memLS) Unlock(now time.Time, token string) error {
m.mu.Lock()
defer m.mu.Unlock()
m.collectExpiredNodes(now)
n := m.byToken[token]
if n == nil {
return ErrNoSuchLock
}
if n.held {
return ErrLocked
}
m.remove(n)
return nil
}
func (m *memLS) canCreate(name string, zeroDepth bool) bool {
return walkToRoot(name, func(name0 string, first bool) bool {
n := m.byName[name0]
if n == nil {
return true
}
if first {
if n.token != "" {
// The target node is already locked.
return false
}
if !zeroDepth {
// The requested lock depth is infinite, and the fact that n exists
// (n != nil) means that a descendent of the target node is locked.
return false
}
} else if n.token != "" && !n.details.ZeroDepth {
// An ancestor of the target node is locked with infinite depth.
return false
}
return true
})
}
func (m *memLS) create(name string) (ret *memLSNode) {
walkToRoot(name, func(name0 string, first bool) bool {
n := m.byName[name0]
if n == nil {
n = &memLSNode{
details: LockDetails{
Root: name0,
},
byExpiryIndex: -1,
}
m.byName[name0] = n
}
n.refCount++
if first {
ret = n
}
return true
})
return ret
}
func (m *memLS) remove(n *memLSNode) {
delete(m.byToken, n.token)
n.token = ""
walkToRoot(n.details.Root, func(name0 string, first bool) bool {
x := m.byName[name0]
x.refCount--
if x.refCount == 0 {
delete(m.byName, name0)
}
return true
})
if n.byExpiryIndex >= 0 {
heap.Remove(&m.byExpiry, n.byExpiryIndex)
}
}
func walkToRoot(name string, f func(name0 string, first bool) bool) bool {
for first := true; ; first = false {
if !f(name, first) {
return false
}
if name == "/" {
break
}
name = name[:strings.LastIndex(name, "/")]
if name == "" {
name = "/"
}
}
return true
}
type memLSNode struct {
// details are the lock metadata. Even if this node's name is not explicitly locked,
// details.Root will still equal the node's name.
details LockDetails
// token is the unique identifier for this node's lock. An empty token means that
// this node is not explicitly locked.
token string
// refCount is the number of self-or-descendent nodes that are explicitly locked.
refCount int
// expiry is when this node's lock expires.
expiry time.Time
// byExpiryIndex is the index of this node in memLS.byExpiry. It is -1
// if this node does not expire, or has expired.
byExpiryIndex int
// held is whether this node's lock is actively held by a Confirm call.
held bool
}
type byExpiry []*memLSNode
func (b *byExpiry) Len() int {
return len(*b)
}
func (b *byExpiry) Less(i, j int) bool {
return (*b)[i].expiry.Before((*b)[j].expiry)
}
func (b *byExpiry) Swap(i, j int) {
(*b)[i], (*b)[j] = (*b)[j], (*b)[i]
(*b)[i].byExpiryIndex = i
(*b)[j].byExpiryIndex = j
}
func (b *byExpiry) Push(x interface{}) {
n := x.(*memLSNode)
n.byExpiryIndex = len(*b)
*b = append(*b, n)
}
func (b *byExpiry) Pop() interface{} {
i := len(*b) - 1
n := (*b)[i]
(*b)[i] = nil
n.byExpiryIndex = -1
*b = (*b)[:i]
return n
}
const infiniteTimeout = -1
// parseTimeout parses the Timeout HTTP header, as per section 10.7. If s is
// empty, an infiniteTimeout is returned.
func parseTimeout(s string) (time.Duration, error) {
if s == "" {
return infiniteTimeout, nil
}
if i := strings.IndexByte(s, ','); i >= 0 {
s = s[:i]
}
s = strings.TrimSpace(s)
if s == "Infinite" {
return infiniteTimeout, nil
}
const pre = "Second-"
if !strings.HasPrefix(s, pre) {
return 0, errInvalidTimeout
}
s = s[len(pre):]
if s == "" || s[0] < '0' || '9' < s[0] {
return 0, errInvalidTimeout
}
n, err := strconv.ParseInt(s, 10, 64)
if err != nil || 1<<32-1 < n {
return 0, errInvalidTimeout
}
return time.Duration(n) * time.Second, nil
}

View File

@ -1,735 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package webdav
import (
"fmt"
"math/rand"
"path"
"reflect"
"sort"
"strconv"
"strings"
"testing"
"time"
)
func TestWalkToRoot(t *testing.T) {
testCases := []struct {
name string
want []string
}{{
"/a/b/c/d",
[]string{
"/a/b/c/d",
"/a/b/c",
"/a/b",
"/a",
"/",
},
}, {
"/a",
[]string{
"/a",
"/",
},
}, {
"/",
[]string{
"/",
},
}}
for _, tc := range testCases {
var got []string
if !walkToRoot(tc.name, func(name0 string, first bool) bool {
if first != (len(got) == 0) {
t.Errorf("name=%q: first=%t but len(got)==%d", tc.name, first, len(got))
return false
}
got = append(got, name0)
return true
}) {
continue
}
if !reflect.DeepEqual(got, tc.want) {
t.Errorf("name=%q:\ngot %q\nwant %q", tc.name, got, tc.want)
}
}
}
var lockTestDurations = []time.Duration{
infiniteTimeout, // infiniteTimeout means to never expire.
0, // A zero duration means to expire immediately.
100 * time.Hour, // A very large duration will not expire in these tests.
}
// lockTestNames are the names of a set of mutually compatible locks. For each
// name fragment:
// - _ means no explicit lock.
// - i means an infinite-depth lock,
// - z means a zero-depth lock,
var lockTestNames = []string{
"/_/_/_/_/z",
"/_/_/i",
"/_/z",
"/_/z/i",
"/_/z/z",
"/_/z/_/i",
"/_/z/_/z",
"/i",
"/z",
"/z/_/i",
"/z/_/z",
}
func lockTestZeroDepth(name string) bool {
switch name[len(name)-1] {
case 'i':
return false
case 'z':
return true
}
panic(fmt.Sprintf("lock name %q did not end with 'i' or 'z'", name))
}
func TestMemLSCanCreate(t *testing.T) {
now := time.Unix(0, 0)
m := NewMemLS().(*memLS)
for _, name := range lockTestNames {
_, err := m.Create(now, LockDetails{
Root: name,
Duration: infiniteTimeout,
ZeroDepth: lockTestZeroDepth(name),
})
if err != nil {
t.Fatalf("creating lock for %q: %v", name, err)
}
}
wantCanCreate := func(name string, zeroDepth bool) bool {
for _, n := range lockTestNames {
switch {
case n == name:
// An existing lock has the same name as the proposed lock.
return false
case strings.HasPrefix(n, name):
// An existing lock would be a child of the proposed lock,
// which conflicts if the proposed lock has infinite depth.
if !zeroDepth {
return false
}
case strings.HasPrefix(name, n):
// An existing lock would be an ancestor of the proposed lock,
// which conflicts if the ancestor has infinite depth.
if n[len(n)-1] == 'i' {
return false
}
}
}
return true
}
var check func(int, string)
check = func(recursion int, name string) {
for _, zeroDepth := range []bool{false, true} {
got := m.canCreate(name, zeroDepth)
want := wantCanCreate(name, zeroDepth)
if got != want {
t.Errorf("canCreate name=%q zeroDepth=%t: got %t, want %t", name, zeroDepth, got, want)
}
}
if recursion == 6 {
return
}
if name != "/" {
name += "/"
}
for _, c := range "_iz" {
check(recursion+1, name+string(c))
}
}
check(0, "/")
}
func TestMemLSLookup(t *testing.T) {
now := time.Unix(0, 0)
m := NewMemLS().(*memLS)
badToken := m.nextToken()
t.Logf("badToken=%q", badToken)
for _, name := range lockTestNames {
token, err := m.Create(now, LockDetails{
Root: name,
Duration: infiniteTimeout,
ZeroDepth: lockTestZeroDepth(name),
})
if err != nil {
t.Fatalf("creating lock for %q: %v", name, err)
}
t.Logf("%-15q -> node=%p token=%q", name, m.byName[name], token)
}
baseNames := append([]string{"/a", "/b/c"}, lockTestNames...)
for _, baseName := range baseNames {
for _, suffix := range []string{"", "/0", "/1/2/3"} {
name := baseName + suffix
goodToken := ""
base := m.byName[baseName]
if base != nil && (suffix == "" || !lockTestZeroDepth(baseName)) {
goodToken = base.token
}
for _, token := range []string{badToken, goodToken} {
if token == "" {
continue
}
got := m.lookup(name, Condition{Token: token})
want := base
if token == badToken {
want = nil
}
if got != want {
t.Errorf("name=%-20qtoken=%q (bad=%t): got %p, want %p",
name, token, token == badToken, got, want)
}
}
}
}
}
func TestMemLSConfirm(t *testing.T) {
now := time.Unix(0, 0)
m := NewMemLS().(*memLS)
alice, err := m.Create(now, LockDetails{
Root: "/alice",
Duration: infiniteTimeout,
ZeroDepth: false,
})
if err != nil {
t.Fatalf("Create: %v", err)
}
tweedle, err := m.Create(now, LockDetails{
Root: "/tweedle",
Duration: infiniteTimeout,
ZeroDepth: false,
})
if err != nil {
t.Fatalf("Create: %v", err)
}
if err := m.consistent(); err != nil {
t.Fatalf("Create: inconsistent state: %v", err)
}
// Test a mismatch between name and condition.
_, err = m.Confirm(now, "/tweedle/dee", "", Condition{Token: alice})
if err != ErrConfirmationFailed {
t.Fatalf("Confirm (mismatch): got %v, want ErrConfirmationFailed", err)
}
if err := m.consistent(); err != nil {
t.Fatalf("Confirm (mismatch): inconsistent state: %v", err)
}
// Test two names (that fall under the same lock) in the one Confirm call.
release, err := m.Confirm(now, "/tweedle/dee", "/tweedle/dum", Condition{Token: tweedle})
if err != nil {
t.Fatalf("Confirm (twins): %v", err)
}
if err := m.consistent(); err != nil {
t.Fatalf("Confirm (twins): inconsistent state: %v", err)
}
release()
if err := m.consistent(); err != nil {
t.Fatalf("release (twins): inconsistent state: %v", err)
}
// Test the same two names in overlapping Confirm / release calls.
releaseDee, err := m.Confirm(now, "/tweedle/dee", "", Condition{Token: tweedle})
if err != nil {
t.Fatalf("Confirm (sequence #0): %v", err)
}
if err := m.consistent(); err != nil {
t.Fatalf("Confirm (sequence #0): inconsistent state: %v", err)
}
_, err = m.Confirm(now, "/tweedle/dum", "", Condition{Token: tweedle})
if err != ErrConfirmationFailed {
t.Fatalf("Confirm (sequence #1): got %v, want ErrConfirmationFailed", err)
}
if err := m.consistent(); err != nil {
t.Fatalf("Confirm (sequence #1): inconsistent state: %v", err)
}
releaseDee()
if err := m.consistent(); err != nil {
t.Fatalf("release (sequence #2): inconsistent state: %v", err)
}
releaseDum, err := m.Confirm(now, "/tweedle/dum", "", Condition{Token: tweedle})
if err != nil {
t.Fatalf("Confirm (sequence #3): %v", err)
}
if err := m.consistent(); err != nil {
t.Fatalf("Confirm (sequence #3): inconsistent state: %v", err)
}
// Test that you can't unlock a held lock.
err = m.Unlock(now, tweedle)
if err != ErrLocked {
t.Fatalf("Unlock (sequence #4): got %v, want ErrLocked", err)
}
releaseDum()
if err := m.consistent(); err != nil {
t.Fatalf("release (sequence #5): inconsistent state: %v", err)
}
err = m.Unlock(now, tweedle)
if err != nil {
t.Fatalf("Unlock (sequence #6): %v", err)
}
if err := m.consistent(); err != nil {
t.Fatalf("Unlock (sequence #6): inconsistent state: %v", err)
}
}
func TestMemLSNonCanonicalRoot(t *testing.T) {
now := time.Unix(0, 0)
m := NewMemLS().(*memLS)
token, err := m.Create(now, LockDetails{
Root: "/foo/./bar//",
Duration: 1 * time.Second,
})
if err != nil {
t.Fatalf("Create: %v", err)
}
if err := m.consistent(); err != nil {
t.Fatalf("Create: inconsistent state: %v", err)
}
if err := m.Unlock(now, token); err != nil {
t.Fatalf("Unlock: %v", err)
}
if err := m.consistent(); err != nil {
t.Fatalf("Unlock: inconsistent state: %v", err)
}
}
func TestMemLSExpiry(t *testing.T) {
m := NewMemLS().(*memLS)
testCases := []string{
"setNow 0",
"create /a.5",
"want /a.5",
"create /c.6",
"want /a.5 /c.6",
"create /a/b.7",
"want /a.5 /a/b.7 /c.6",
"setNow 4",
"want /a.5 /a/b.7 /c.6",
"setNow 5",
"want /a/b.7 /c.6",
"setNow 6",
"want /a/b.7",
"setNow 7",
"want ",
"setNow 8",
"want ",
"create /a.12",
"create /b.13",
"create /c.15",
"create /a/d.16",
"want /a.12 /a/d.16 /b.13 /c.15",
"refresh /a.14",
"want /a.14 /a/d.16 /b.13 /c.15",
"setNow 12",
"want /a.14 /a/d.16 /b.13 /c.15",
"setNow 13",
"want /a.14 /a/d.16 /c.15",
"setNow 14",
"want /a/d.16 /c.15",
"refresh /a/d.20",
"refresh /c.20",
"want /a/d.20 /c.20",
"setNow 20",
"want ",
}
tokens := map[string]string{}
zTime := time.Unix(0, 0)
now := zTime
for i, tc := range testCases {
j := strings.IndexByte(tc, ' ')
if j < 0 {
t.Fatalf("test case #%d %q: invalid command", i, tc)
}
op, arg := tc[:j], tc[j+1:]
switch op {
default:
t.Fatalf("test case #%d %q: invalid operation %q", i, tc, op)
case "create", "refresh":
parts := strings.Split(arg, ".")
if len(parts) != 2 {
t.Fatalf("test case #%d %q: invalid create", i, tc)
}
root := parts[0]
d, err := strconv.Atoi(parts[1])
if err != nil {
t.Fatalf("test case #%d %q: invalid duration", i, tc)
}
dur := time.Unix(0, 0).Add(time.Duration(d) * time.Second).Sub(now)
switch op {
case "create":
token, err := m.Create(now, LockDetails{
Root: root,
Duration: dur,
ZeroDepth: true,
})
if err != nil {
t.Fatalf("test case #%d %q: Create: %v", i, tc, err)
}
tokens[root] = token
case "refresh":
token := tokens[root]
if token == "" {
t.Fatalf("test case #%d %q: no token for %q", i, tc, root)
}
got, err := m.Refresh(now, token, dur)
if err != nil {
t.Fatalf("test case #%d %q: Refresh: %v", i, tc, err)
}
want := LockDetails{
Root: root,
Duration: dur,
ZeroDepth: true,
}
if got != want {
t.Fatalf("test case #%d %q:\ngot %v\nwant %v", i, tc, got, want)
}
}
case "setNow":
d, err := strconv.Atoi(arg)
if err != nil {
t.Fatalf("test case #%d %q: invalid duration", i, tc)
}
now = time.Unix(0, 0).Add(time.Duration(d) * time.Second)
case "want":
m.mu.Lock()
m.collectExpiredNodes(now)
got := make([]string, 0, len(m.byToken))
for _, n := range m.byToken {
got = append(got, fmt.Sprintf("%s.%d",
n.details.Root, n.expiry.Sub(zTime)/time.Second))
}
m.mu.Unlock()
sort.Strings(got)
want := []string{}
if arg != "" {
want = strings.Split(arg, " ")
}
if !reflect.DeepEqual(got, want) {
t.Fatalf("test case #%d %q:\ngot %q\nwant %q", i, tc, got, want)
}
}
if err := m.consistent(); err != nil {
t.Fatalf("test case #%d %q: inconsistent state: %v", i, tc, err)
}
}
}
func TestMemLS(t *testing.T) {
now := time.Unix(0, 0)
m := NewMemLS().(*memLS)
rng := rand.New(rand.NewSource(0))
tokens := map[string]string{}
nConfirm, nCreate, nRefresh, nUnlock := 0, 0, 0, 0
const N = 2000
for i := 0; i < N; i++ {
name := lockTestNames[rng.Intn(len(lockTestNames))]
duration := lockTestDurations[rng.Intn(len(lockTestDurations))]
confirmed, unlocked := false, false
// If the name was already locked, we randomly confirm/release, refresh
// or unlock it. Otherwise, we create a lock.
token := tokens[name]
if token != "" {
switch rng.Intn(3) {
case 0:
confirmed = true
nConfirm++
release, err := m.Confirm(now, name, "", Condition{Token: token})
if err != nil {
t.Fatalf("iteration #%d: Confirm %q: %v", i, name, err)
}
if err := m.consistent(); err != nil {
t.Fatalf("iteration #%d: inconsistent state: %v", i, err)
}
release()
case 1:
nRefresh++
if _, err := m.Refresh(now, token, duration); err != nil {
t.Fatalf("iteration #%d: Refresh %q: %v", i, name, err)
}
case 2:
unlocked = true
nUnlock++
if err := m.Unlock(now, token); err != nil {
t.Fatalf("iteration #%d: Unlock %q: %v", i, name, err)
}
}
} else {
nCreate++
var err error
token, err = m.Create(now, LockDetails{
Root: name,
Duration: duration,
ZeroDepth: lockTestZeroDepth(name),
})
if err != nil {
t.Fatalf("iteration #%d: Create %q: %v", i, name, err)
}
}
if !confirmed {
if duration == 0 || unlocked {
// A zero-duration lock should expire immediately and is
// effectively equivalent to being unlocked.
tokens[name] = ""
} else {
tokens[name] = token
}
}
if err := m.consistent(); err != nil {
t.Fatalf("iteration #%d: inconsistent state: %v", i, err)
}
}
if nConfirm < N/10 {
t.Fatalf("too few Confirm calls: got %d, want >= %d", nConfirm, N/10)
}
if nCreate < N/10 {
t.Fatalf("too few Create calls: got %d, want >= %d", nCreate, N/10)
}
if nRefresh < N/10 {
t.Fatalf("too few Refresh calls: got %d, want >= %d", nRefresh, N/10)
}
if nUnlock < N/10 {
t.Fatalf("too few Unlock calls: got %d, want >= %d", nUnlock, N/10)
}
}
func (m *memLS) consistent() error {
m.mu.Lock()
defer m.mu.Unlock()
// If m.byName is non-empty, then it must contain an entry for the root "/",
// and its refCount should equal the number of locked nodes.
if len(m.byName) > 0 {
n := m.byName["/"]
if n == nil {
return fmt.Errorf(`non-empty m.byName does not contain the root "/"`)
}
if n.refCount != len(m.byToken) {
return fmt.Errorf("root node refCount=%d, differs from len(m.byToken)=%d", n.refCount, len(m.byToken))
}
}
for name, n := range m.byName {
// The map keys should be consistent with the node's copy of the key.
if n.details.Root != name {
return fmt.Errorf("node name %q != byName map key %q", n.details.Root, name)
}
// A name must be clean, and start with a "/".
if len(name) == 0 || name[0] != '/' {
return fmt.Errorf(`node name %q does not start with "/"`, name)
}
if name != path.Clean(name) {
return fmt.Errorf(`node name %q is not clean`, name)
}
// A node's refCount should be positive.
if n.refCount <= 0 {
return fmt.Errorf("non-positive refCount for node at name %q", name)
}
// A node's refCount should be the number of self-or-descendents that
// are locked (i.e. have a non-empty token).
var list []string
for name0, n0 := range m.byName {
// All of lockTestNames' name fragments are one byte long: '_', 'i' or 'z',
// so strings.HasPrefix is equivalent to self-or-descendent name match.
// We don't have to worry about "/foo/bar" being a false positive match
// for "/foo/b".
if strings.HasPrefix(name0, name) && n0.token != "" {
list = append(list, name0)
}
}
if n.refCount != len(list) {
sort.Strings(list)
return fmt.Errorf("node at name %q has refCount %d but locked self-or-descendents are %q (len=%d)",
name, n.refCount, list, len(list))
}
// A node n is in m.byToken if it has a non-empty token.
if n.token != "" {
if _, ok := m.byToken[n.token]; !ok {
return fmt.Errorf("node at name %q has token %q but not in m.byToken", name, n.token)
}
}
// A node n is in m.byExpiry if it has a non-negative byExpiryIndex.
if n.byExpiryIndex >= 0 {
if n.byExpiryIndex >= len(m.byExpiry) {
return fmt.Errorf("node at name %q has byExpiryIndex %d but m.byExpiry has length %d", name, n.byExpiryIndex, len(m.byExpiry))
}
if n != m.byExpiry[n.byExpiryIndex] {
return fmt.Errorf("node at name %q has byExpiryIndex %d but that indexes a different node", name, n.byExpiryIndex)
}
}
}
for token, n := range m.byToken {
// The map keys should be consistent with the node's copy of the key.
if n.token != token {
return fmt.Errorf("node token %q != byToken map key %q", n.token, token)
}
// Every node in m.byToken is in m.byName.
if _, ok := m.byName[n.details.Root]; !ok {
return fmt.Errorf("node at name %q in m.byToken but not in m.byName", n.details.Root)
}
}
for i, n := range m.byExpiry {
// The slice indices should be consistent with the node's copy of the index.
if n.byExpiryIndex != i {
return fmt.Errorf("node byExpiryIndex %d != byExpiry slice index %d", n.byExpiryIndex, i)
}
// Every node in m.byExpiry is in m.byName.
if _, ok := m.byName[n.details.Root]; !ok {
return fmt.Errorf("node at name %q in m.byExpiry but not in m.byName", n.details.Root)
}
// No node in m.byExpiry should be held.
if n.held {
return fmt.Errorf("node at name %q in m.byExpiry is held", n.details.Root)
}
}
return nil
}
func TestParseTimeout(t *testing.T) {
testCases := []struct {
s string
want time.Duration
wantErr error
}{{
"",
infiniteTimeout,
nil,
}, {
"Infinite",
infiniteTimeout,
nil,
}, {
"Infinitesimal",
0,
errInvalidTimeout,
}, {
"infinite",
0,
errInvalidTimeout,
}, {
"Second-0",
0 * time.Second,
nil,
}, {
"Second-123",
123 * time.Second,
nil,
}, {
" Second-456 ",
456 * time.Second,
nil,
}, {
"Second-4100000000",
4100000000 * time.Second,
nil,
}, {
"junk",
0,
errInvalidTimeout,
}, {
"Second-",
0,
errInvalidTimeout,
}, {
"Second--1",
0,
errInvalidTimeout,
}, {
"Second--123",
0,
errInvalidTimeout,
}, {
"Second-+123",
0,
errInvalidTimeout,
}, {
"Second-0x123",
0,
errInvalidTimeout,
}, {
"second-123",
0,
errInvalidTimeout,
}, {
"Second-4294967295",
4294967295 * time.Second,
nil,
}, {
// Section 10.7 says that "The timeout value for TimeType "Second"
// must not be greater than 2^32-1."
"Second-4294967296",
0,
errInvalidTimeout,
}, {
// This test case comes from section 9.10.9 of the spec. It says,
//
// "In this request, the client has specified that it desires an
// infinite-length lock, if available, otherwise a timeout of 4.1
// billion seconds, if available."
//
// The Go WebDAV package always supports infinite length locks,
// and ignores the fallback after the comma.
"Infinite, Second-4100000000",
infiniteTimeout,
nil,
}}
for _, tc := range testCases {
got, gotErr := parseTimeout(tc.s)
if got != tc.want || gotErr != tc.wantErr {
t.Errorf("parsing %q:\ngot %v, %v\nwant %v, %v", tc.s, got, gotErr, tc.want, tc.wantErr)
}
}
}

View File

@ -14,6 +14,7 @@ import (
"mime"
"net/http"
"os"
"path"
"path/filepath"
"strconv"
)
@ -103,7 +104,7 @@ type DeadPropsHolder interface {
var liveProps = map[xml.Name]struct {
// findFn implements the propfind function of this property. If nil,
// it indicates a hidden property.
findFn func(context.Context, FileSystem, LockSystem, string, os.FileInfo) (string, error)
findFn func(context.Context, FileSystem, string, os.FileInfo) (string, error)
// dir is true if the property applies to directories.
dir bool
}{
@ -150,14 +151,6 @@ var liveProps = map[xml.Name]struct {
// collections.
dir: false,
},
// TODO: The lockdiscovery property requires LockSystem to list the
// active locks on a resource.
{Space: "DAV:", Local: "lockdiscovery"}: {},
{Space: "DAV:", Local: "supportedlock"}: {
findFn: findSupportedLock,
dir: true,
},
}
// TODO(nigeltao) merge props and allprop?
@ -166,7 +159,7 @@ var liveProps = map[xml.Name]struct {
//
// Each Propstat has a unique status and each property name will only be part
// of one Propstat element.
func props(ctx context.Context, fs FileSystem, ls LockSystem, name string, pnames []xml.Name) ([]Propstat, error) {
func props(ctx context.Context, fs FileSystem, name string, pnames []xml.Name) ([]Propstat, error) {
f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0)
if err != nil {
return nil, err
@ -196,7 +189,7 @@ func props(ctx context.Context, fs FileSystem, ls LockSystem, name string, pname
}
// Otherwise, it must either be a live property or we don't know it.
if prop := liveProps[pn]; prop.findFn != nil && (prop.dir || !isDir) {
innerXML, err := prop.findFn(ctx, fs, ls, name, fi)
innerXML, err := prop.findFn(ctx, fs, name, fi)
if err != nil {
return nil, err
}
@ -214,7 +207,7 @@ func props(ctx context.Context, fs FileSystem, ls LockSystem, name string, pname
}
// Propnames returns the property names defined for resource name.
func propnames(ctx context.Context, fs FileSystem, ls LockSystem, name string) ([]xml.Name, error) {
func propnames(ctx context.Context, fs FileSystem, name string) ([]xml.Name, error) {
f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0)
if err != nil {
return nil, err
@ -254,8 +247,8 @@ func propnames(ctx context.Context, fs FileSystem, ls LockSystem, name string) (
// returned if they are named in 'include'.
//
// See http://www.webdav.org/specs/rfc4918.html#METHOD_PROPFIND
func allprop(ctx context.Context, fs FileSystem, ls LockSystem, name string, include []xml.Name) ([]Propstat, error) {
pnames, err := propnames(ctx, fs, ls, name)
func allprop(ctx context.Context, fs FileSystem, name string, include []xml.Name) ([]Propstat, error) {
pnames, err := propnames(ctx, fs, name)
if err != nil {
return nil, err
}
@ -269,71 +262,7 @@ func allprop(ctx context.Context, fs FileSystem, ls LockSystem, name string, inc
pnames = append(pnames, pn)
}
}
return props(ctx, fs, ls, name, pnames)
}
// Patch patches the properties of resource name. The return values are
// constrained in the same manner as DeadPropsHolder.Patch.
func patch(ctx context.Context, fs FileSystem, ls LockSystem, name string, patches []Proppatch) ([]Propstat, error) {
conflict := false
loop:
for _, patch := range patches {
for _, p := range patch.Props {
if _, ok := liveProps[p.XMLName]; ok {
conflict = true
break loop
}
}
}
if conflict {
pstatForbidden := Propstat{
Status: http.StatusForbidden,
XMLError: `<D:cannot-modify-protected-property xmlns:D="DAV:"/>`,
}
pstatFailedDep := Propstat{
Status: StatusFailedDependency,
}
for _, patch := range patches {
for _, p := range patch.Props {
if _, ok := liveProps[p.XMLName]; ok {
pstatForbidden.Props = append(pstatForbidden.Props, Property{XMLName: p.XMLName})
} else {
pstatFailedDep.Props = append(pstatFailedDep.Props, Property{XMLName: p.XMLName})
}
}
}
return makePropstats(pstatForbidden, pstatFailedDep), nil
}
f, err := fs.OpenFile(ctx, name, os.O_RDWR, 0)
if err != nil {
return nil, err
}
defer f.Close()
if dph, ok := f.(DeadPropsHolder); ok {
ret, err := dph.Patch(patches)
if err != nil {
return nil, err
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat says that
// "The contents of the prop XML element must only list the names of
// properties to which the result in the status element applies."
for _, pstat := range ret {
for i, p := range pstat.Props {
pstat.Props[i] = Property{XMLName: p.XMLName}
}
}
return ret, nil
}
// The file doesn't implement the optional DeadPropsHolder interface, so
// all patches are forbidden.
pstat := Propstat{Status: http.StatusForbidden}
for _, patch := range patches {
for _, p := range patch.Props {
pstat.Props = append(pstat.Props, Property{XMLName: p.XMLName})
}
}
return []Propstat{pstat}, nil
return props(ctx, fs, name, pnames)
}
func escapeXML(s string) string {
@ -356,26 +285,26 @@ func escapeXML(s string) string {
return s
}
func findResourceType(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
func findResourceType(ctx context.Context, fs FileSystem, name string, fi os.FileInfo) (string, error) {
if fi.IsDir() {
return `<D:collection xmlns:D="DAV:"/>`, nil
}
return "", nil
}
func findDisplayName(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
if slashClean(name) == "/" {
func findDisplayName(ctx context.Context, fs FileSystem, name string, fi os.FileInfo) (string, error) {
if path.Clean(name) == "/" {
// Hide the real name of a possibly prefixed root directory.
return "", nil
}
return escapeXML(fi.Name()), nil
}
func findContentLength(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
func findContentLength(ctx context.Context, fs FileSystem, name string, fi os.FileInfo) (string, error) {
return strconv.FormatInt(fi.Size(), 10), nil
}
func findLastModified(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
func findLastModified(ctx context.Context, fs FileSystem, name string, fi os.FileInfo) (string, error) {
return fi.ModTime().UTC().Format(http.TimeFormat), nil
}
@ -400,7 +329,7 @@ type ContentTyper interface {
ContentType(ctx context.Context) (string, error)
}
func findContentType(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
func findContentType(ctx context.Context, fs FileSystem, name string, fi os.FileInfo) (string, error) {
if do, ok := fi.(ContentTyper); ok {
ctype, err := do.ContentType(ctx)
if err != ErrNotImplemented {
@ -447,7 +376,7 @@ type ETager interface {
ETag(ctx context.Context) (string, error)
}
func findETag(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
func findETag(ctx context.Context, fs FileSystem, name string, fi os.FileInfo) (string, error) {
if do, ok := fi.(ETager); ok {
etag, err := do.ETag(ctx)
if err != ErrNotImplemented {
@ -458,12 +387,4 @@ func findETag(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi
// modification time and size of a file. We replicate the heuristic
// with nanosecond granularity.
return fmt.Sprintf(`"%x%x"`, fi.ModTime().UnixNano(), fi.Size()), nil
}
func findSupportedLock(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
return `` +
`<D:lockentry xmlns:D="DAV:">` +
`<D:lockscope><D:exclusive/></D:lockscope>` +
`<D:locktype><D:write/></D:locktype>` +
`</D:lockentry>`, nil
}
}

View File

@ -3,7 +3,7 @@
// license that can be found in the LICENSE file.
// Package webdav provides a WebDAV server implementation.
package webdav // import "golang.org/x/net/webdav"
package webdav
import (
"errors"
@ -14,7 +14,6 @@ import (
"os"
"path"
"strings"
"time"
)
type Handler struct {
@ -22,8 +21,6 @@ type Handler struct {
Prefix string
// FileSystem is the virtual file system.
FileSystem FileSystem
// LockSystem is the lock management system.
LockSystem LockSystem
// Logger is an optional error logger. If non-nil, it will be called
// for all HTTP requests.
Logger func(*http.Request, error)
@ -43,8 +40,6 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
status, err := http.StatusBadRequest, errUnsupportedMethod
if h.FileSystem == nil {
status, err = http.StatusInternalServerError, errNoFileSystem
} else if h.LockSystem == nil {
status, err = http.StatusInternalServerError, errNoLockSystem
} else {
switch r.Method {
case "OPTIONS":
@ -59,21 +54,15 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
status, err = h.handleMkcol(w, r)
case "COPY", "MOVE":
status, err = h.handleCopyMove(w, r)
case "LOCK":
status, err = h.handleLock(w, r)
case "UNLOCK":
status, err = h.handleUnlock(w, r)
case "PROPFIND":
status, err = h.handlePropfind(w, r)
case "PROPPATCH":
status, err = h.handleProppatch(w, r)
}
}
if status != 0 {
w.WriteHeader(status)
if status != http.StatusNoContent {
w.Write([]byte(StatusText(status)))
w.Write([]byte(http.StatusText(status)))
}
}
if h.Logger != nil {
@ -81,106 +70,18 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
}
func (h *Handler) lock(now time.Time, root string) (token string, status int, err error) {
token, err = h.LockSystem.Create(now, LockDetails{
Root: root,
Duration: infiniteTimeout,
ZeroDepth: true,
})
if err != nil {
if err == ErrLocked {
return "", StatusLocked, err
}
return "", http.StatusInternalServerError, err
}
return token, 0, nil
}
func (h *Handler) confirmLocks(r *http.Request, src, dst string) (release func(), status int, err error) {
hdr := r.Header.Get("If")
if hdr == "" {
// An empty If header means that the client hasn't previously created locks.
// Even if this client doesn't care about locks, we still need to check that
// the resources aren't locked by another client, so we create temporary
// locks that would conflict with another client's locks. These temporary
// locks are unlocked at the end of the HTTP request.
now, srcToken, dstToken := time.Now(), "", ""
if src != "" {
srcToken, status, err = h.lock(now, src)
if err != nil {
return nil, status, err
}
}
if dst != "" {
dstToken, status, err = h.lock(now, dst)
if err != nil {
if srcToken != "" {
h.LockSystem.Unlock(now, srcToken)
}
return nil, status, err
}
}
return func() {
if dstToken != "" {
h.LockSystem.Unlock(now, dstToken)
}
if srcToken != "" {
h.LockSystem.Unlock(now, srcToken)
}
}, 0, nil
}
ih, ok := parseIfHeader(hdr)
if !ok {
return nil, http.StatusBadRequest, errInvalidIfHeader
}
// ih is a disjunction (OR) of ifLists, so any ifList will do.
for _, l := range ih.lists {
lsrc := l.resourceTag
if lsrc == "" {
lsrc = src
} else {
u, err := url.Parse(lsrc)
if err != nil {
continue
}
if u.Host != r.Host {
continue
}
lsrc, status, err = h.stripPrefix(u.Path)
if err != nil {
return nil, status, err
}
}
release, err = h.LockSystem.Confirm(time.Now(), lsrc, dst, l.conditions...)
if err == ErrConfirmationFailed {
continue
}
if err != nil {
return nil, http.StatusInternalServerError, err
}
return release, 0, nil
}
// Section 10.4.1 says that "If this header is evaluated and all state lists
// fail, then the request must fail with a 412 (Precondition Failed) status."
// We follow the spec even though the cond_put_corrupt_token test case from
// the litmus test warns on seeing a 412 instead of a 423 (Locked).
return nil, http.StatusPreconditionFailed, ErrLocked
}
func (h *Handler) handleOptions(w http.ResponseWriter, r *http.Request) (status int, err error) {
reqPath, status, err := h.stripPrefix(r.URL.Path)
if err != nil {
return status, err
}
ctx := r.Context()
allow := "OPTIONS, LOCK, PUT, MKCOL"
allow := "OPTIONS, PUT, MKCOL"
if fi, err := h.FileSystem.Stat(ctx, reqPath); err == nil {
if fi.IsDir() {
allow = "OPTIONS, LOCK, DELETE, PROPPATCH, COPY, MOVE, UNLOCK, PROPFIND"
allow = "OPTIONS, DELETE, COPY, MOVE, PROPFIND"
} else {
allow = "OPTIONS, LOCK, GET, HEAD, POST, DELETE, PROPPATCH, COPY, MOVE, UNLOCK, PROPFIND, PUT"
allow = "OPTIONS, GET, HEAD, POST, DELETE, COPY, MOVE, PROPFIND, PUT"
}
}
w.Header().Set("Allow", allow)
@ -196,7 +97,6 @@ func (h *Handler) handleGetHeadPost(w http.ResponseWriter, r *http.Request) (sta
if err != nil {
return status, err
}
// TODO: check locks for read-only access??
ctx := r.Context()
f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDONLY, 0)
if err != nil {
@ -210,7 +110,7 @@ func (h *Handler) handleGetHeadPost(w http.ResponseWriter, r *http.Request) (sta
if fi.IsDir() {
return http.StatusMethodNotAllowed, nil
}
etag, err := findETag(ctx, h.FileSystem, h.LockSystem, reqPath, fi)
etag, err := findETag(ctx, h.FileSystem, reqPath, fi)
if err != nil {
return http.StatusInternalServerError, err
}
@ -225,11 +125,6 @@ func (h *Handler) handleDelete(w http.ResponseWriter, r *http.Request) (status i
if err != nil {
return status, err
}
release, status, err := h.confirmLocks(r, reqPath, "")
if err != nil {
return status, err
}
defer release()
ctx := r.Context()
@ -255,13 +150,6 @@ func (h *Handler) handlePut(w http.ResponseWriter, r *http.Request) (status int,
if err != nil {
return status, err
}
release, status, err := h.confirmLocks(r, reqPath, "")
if err != nil {
return status, err
}
defer release()
// TODO(rost): Support the If-Match, If-None-Match headers? See bradfitz'
// comments in http.checkEtag.
ctx := r.Context()
f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
@ -281,7 +169,7 @@ func (h *Handler) handlePut(w http.ResponseWriter, r *http.Request) (status int,
if closeErr != nil {
return http.StatusMethodNotAllowed, closeErr
}
etag, err := findETag(ctx, h.FileSystem, h.LockSystem, reqPath, fi)
etag, err := findETag(ctx, h.FileSystem, reqPath, fi)
if err != nil {
return http.StatusInternalServerError, err
}
@ -294,12 +182,6 @@ func (h *Handler) handleMkcol(w http.ResponseWriter, r *http.Request) (status in
if err != nil {
return status, err
}
release, status, err := h.confirmLocks(r, reqPath, "")
if err != nil {
return status, err
}
defer release()
ctx := r.Context()
if r.ContentLength > 0 {
@ -347,17 +229,6 @@ func (h *Handler) handleCopyMove(w http.ResponseWriter, r *http.Request) (status
ctx := r.Context()
if r.Method == "COPY" {
// Section 7.5.1 says that a COPY only needs to lock the destination,
// not both destination and source. Strictly speaking, this is racy,
// even though a COPY doesn't modify the source, if a concurrent
// operation modifies the source. However, the litmus test explicitly
// checks that COPYing a locked-by-another source is OK.
release, status, err := h.confirmLocks(r, "", dst)
if err != nil {
return status, err
}
defer release()
// Section 9.8.3 says that "The COPY method on a collection without a Depth
// header must act as if a Depth header with value "infinity" was included".
depth := infiniteDepth
@ -372,12 +243,6 @@ func (h *Handler) handleCopyMove(w http.ResponseWriter, r *http.Request) (status
return copyFiles(ctx, h.FileSystem, src, dst, r.Header.Get("Overwrite") != "F", depth, 0)
}
release, status, err := h.confirmLocks(r, src, dst)
if err != nil {
return status, err
}
defer release()
// Section 9.9.2 says that "The MOVE method on a collection must act as if
// a "Depth: infinity" header was used on it. A client must not submit a
// Depth header on a MOVE on a collection with any value but "infinity"."
@ -389,123 +254,6 @@ func (h *Handler) handleCopyMove(w http.ResponseWriter, r *http.Request) (status
return moveFiles(ctx, h.FileSystem, src, dst, r.Header.Get("Overwrite") == "T")
}
func (h *Handler) handleLock(w http.ResponseWriter, r *http.Request) (retStatus int, retErr error) {
duration, err := parseTimeout(r.Header.Get("Timeout"))
if err != nil {
return http.StatusBadRequest, err
}
li, status, err := readLockInfo(r.Body)
if err != nil {
return status, err
}
ctx := r.Context()
token, ld, now, created := "", LockDetails{}, time.Now(), false
if li == (lockInfo{}) {
// An empty lockInfo means to refresh the lock.
ih, ok := parseIfHeader(r.Header.Get("If"))
if !ok {
return http.StatusBadRequest, errInvalidIfHeader
}
if len(ih.lists) == 1 && len(ih.lists[0].conditions) == 1 {
token = ih.lists[0].conditions[0].Token
}
if token == "" {
return http.StatusBadRequest, errInvalidLockToken
}
ld, err = h.LockSystem.Refresh(now, token, duration)
if err != nil {
if err == ErrNoSuchLock {
return http.StatusPreconditionFailed, err
}
return http.StatusInternalServerError, err
}
} else {
// Section 9.10.3 says that "If no Depth header is submitted on a LOCK request,
// then the request MUST act as if a "Depth:infinity" had been submitted."
depth := infiniteDepth
if hdr := r.Header.Get("Depth"); hdr != "" {
depth = parseDepth(hdr)
if depth != 0 && depth != infiniteDepth {
// Section 9.10.3 says that "Values other than 0 or infinity must not be
// used with the Depth header on a LOCK method".
return http.StatusBadRequest, errInvalidDepth
}
}
reqPath, status, err := h.stripPrefix(r.URL.Path)
if err != nil {
return status, err
}
ld = LockDetails{
Root: reqPath,
Duration: duration,
OwnerXML: li.Owner.InnerXML,
ZeroDepth: depth == 0,
}
token, err = h.LockSystem.Create(now, ld)
if err != nil {
if err == ErrLocked {
return StatusLocked, err
}
return http.StatusInternalServerError, err
}
defer func() {
if retErr != nil {
h.LockSystem.Unlock(now, token)
}
}()
// Create the resource if it didn't previously exist.
if _, err := h.FileSystem.Stat(ctx, reqPath); err != nil {
f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
// TODO: detect missing intermediate dirs and return http.StatusConflict?
return http.StatusInternalServerError, err
}
f.Close()
created = true
}
// http://www.webdav.org/specs/rfc4918.html#HEADER_Lock-Token says that the
// Lock-Token value is a Coded-URL. We add angle brackets.
w.Header().Set("Lock-Token", "<"+token+">")
}
w.Header().Set("Content-Type", "application/xml; charset=utf-8")
if created {
// This is "w.WriteHeader(http.StatusCreated)" and not "return
// http.StatusCreated, nil" because we write our own (XML) response to w
// and Handler.ServeHTTP would otherwise write "Created".
w.WriteHeader(http.StatusCreated)
}
writeLockInfo(w, token, ld)
return 0, nil
}
func (h *Handler) handleUnlock(w http.ResponseWriter, r *http.Request) (status int, err error) {
// http://www.webdav.org/specs/rfc4918.html#HEADER_Lock-Token says that the
// Lock-Token value is a Coded-URL. We strip its angle brackets.
t := r.Header.Get("Lock-Token")
if len(t) < 2 || t[0] != '<' || t[len(t)-1] != '>' {
return http.StatusBadRequest, errInvalidLockToken
}
t = t[1 : len(t)-1]
switch err = h.LockSystem.Unlock(time.Now(), t); err {
case nil:
return http.StatusNoContent, err
case ErrForbidden:
return http.StatusForbidden, err
case ErrLocked:
return StatusLocked, err
case ErrNoSuchLock:
return http.StatusConflict, err
default:
return http.StatusInternalServerError, err
}
}
func (h *Handler) handlePropfind(w http.ResponseWriter, r *http.Request) (status int, err error) {
reqPath, status, err := h.stripPrefix(r.URL.Path)
if err != nil {
@ -539,7 +287,7 @@ func (h *Handler) handlePropfind(w http.ResponseWriter, r *http.Request) (status
}
var pstats []Propstat
if pf.Propname != nil {
pnames, err := propnames(ctx, h.FileSystem, h.LockSystem, reqPath)
pnames, err := propnames(ctx, h.FileSystem, reqPath)
if err != nil {
return err
}
@ -549,9 +297,9 @@ func (h *Handler) handlePropfind(w http.ResponseWriter, r *http.Request) (status
}
pstats = append(pstats, pstat)
} else if pf.Allprop != nil {
pstats, err = allprop(ctx, h.FileSystem, h.LockSystem, reqPath, pf.Prop)
pstats, err = allprop(ctx, h.FileSystem, reqPath, pf.Prop)
} else {
pstats, err = props(ctx, h.FileSystem, h.LockSystem, reqPath, pf.Prop)
pstats, err = props(ctx, h.FileSystem, reqPath, pf.Prop)
}
if err != nil {
return err
@ -574,45 +322,6 @@ func (h *Handler) handlePropfind(w http.ResponseWriter, r *http.Request) (status
return 0, nil
}
func (h *Handler) handleProppatch(w http.ResponseWriter, r *http.Request) (status int, err error) {
reqPath, status, err := h.stripPrefix(r.URL.Path)
if err != nil {
return status, err
}
release, status, err := h.confirmLocks(r, reqPath, "")
if err != nil {
return status, err
}
defer release()
ctx := r.Context()
if _, err := h.FileSystem.Stat(ctx, reqPath); err != nil {
if os.IsNotExist(err) {
return http.StatusNotFound, err
}
return http.StatusMethodNotAllowed, err
}
patches, status, err := readProppatch(r.Body)
if err != nil {
return status, err
}
pstats, err := patch(ctx, h.FileSystem, h.LockSystem, reqPath, patches)
if err != nil {
return http.StatusInternalServerError, err
}
mw := multistatusWriter{w: w}
writeErr := mw.write(makePropstatResponse(r.URL.Path, pstats))
closeErr := mw.close()
if writeErr != nil {
return http.StatusInternalServerError, writeErr
}
if closeErr != nil {
return http.StatusInternalServerError, closeErr
}
return 0, nil
}
func makePropstatResponse(href string, pstats []Propstat) *response {
resp := response{
Href: []string{(&url.URL{Path: href}).EscapedPath()},
@ -624,7 +333,7 @@ func makePropstatResponse(href string, pstats []Propstat) *response {
xmlErr = &xmlError{InnerXML: []byte(p.XMLError)}
}
resp.Propstat = append(resp.Propstat, propstat{
Status: fmt.Sprintf("HTTP/1.1 %d %s", p.Status, StatusText(p.Status)),
Status: fmt.Sprintf("HTTP/1.1 %d %s", p.Status, http.StatusText(p.Status)),
Prop: p.Props,
ResponseDescription: p.ResponseDescription,
Error: xmlErr,
@ -645,7 +354,6 @@ const (
// - PROPFIND has no further restrictions, as per section 9.1.
// - COPY accepts only "0" or "infinity", as per section 9.8.3.
// - MOVE accepts only "infinity", as per section 9.9.2.
// - LOCK accepts only "0" or "infinity", as per section 9.10.3.
// These constraints are enforced by the handleXxx methods.
func parseDepth(s string) int {
switch s {
@ -654,53 +362,22 @@ func parseDepth(s string) int {
case "1":
return 1
case "infinity":
return infiniteDepth
// forbidden for security
return invalidDepth
}
return invalidDepth
}
// http://www.webdav.org/specs/rfc4918.html#status.code.extensions.to.http11
const (
StatusMulti = 207
StatusUnprocessableEntity = 422
StatusLocked = 423
StatusFailedDependency = 424
StatusInsufficientStorage = 507
)
func StatusText(code int) string {
switch code {
case StatusMulti:
return "Multi-Status"
case StatusUnprocessableEntity:
return "Unprocessable Entity"
case StatusLocked:
return "Locked"
case StatusFailedDependency:
return "Failed Dependency"
case StatusInsufficientStorage:
return "Insufficient Storage"
}
return http.StatusText(code)
}
var (
errDestinationEqualsSource = errors.New("webdav: destination equals source")
errDirectoryNotEmpty = errors.New("webdav: directory not empty")
errInvalidDepth = errors.New("webdav: invalid depth")
errInvalidDestination = errors.New("webdav: invalid destination")
errInvalidIfHeader = errors.New("webdav: invalid If header")
errInvalidLockInfo = errors.New("webdav: invalid lock info")
errInvalidLockToken = errors.New("webdav: invalid lock token")
errInvalidPropfind = errors.New("webdav: invalid propfind")
errInvalidProppatch = errors.New("webdav: invalid proppatch")
errInvalidResponse = errors.New("webdav: invalid response")
errInvalidTimeout = errors.New("webdav: invalid timeout")
errNoFileSystem = errors.New("webdav: no file system")
errNoLockSystem = errors.New("webdav: no lock system")
errNotADirectory = errors.New("webdav: not a directory")
errPrefixMismatch = errors.New("webdav: prefix mismatch")
errRecursionTooDeep = errors.New("webdav: recursion too deep")
errUnsupportedLockInfo = errors.New("webdav: unsupported lock info")
errUnsupportedMethod = errors.New("webdav: unsupported method")
)

View File

@ -13,63 +13,8 @@ import (
"fmt"
"io"
"net/http"
"time"
// As of https://go-review.googlesource.com/#/c/12772/ which was submitted
// in July 2015, this package uses an internal fork of the standard
// library's encoding/xml package, due to changes in the way namespaces
// were encoded. Such changes were introduced in the Go 1.5 cycle, but were
// rolled back in response to https://github.com/golang/go/issues/11841
//
// However, this package's exported API, specifically the Property and
// DeadPropsHolder types, need to refer to the standard library's version
// of the xml.Name type, as code that imports this package cannot refer to
// the internal version.
//
// This file therefore imports both the internal and external versions, as
// ixml and xml, and converts between them.
//
// In the long term, this package should use the standard library's version
// only, and the internal fork deleted, once
// https://github.com/golang/go/issues/13400 is resolved.
ixml "golang.org/x/net/webdav/internal/xml"
)
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_lockinfo
type lockInfo struct {
XMLName ixml.Name `xml:"lockinfo"`
Exclusive *struct{} `xml:"lockscope>exclusive"`
Shared *struct{} `xml:"lockscope>shared"`
Write *struct{} `xml:"locktype>write"`
Owner owner `xml:"owner"`
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_owner
type owner struct {
InnerXML string `xml:",innerxml"`
}
func readLockInfo(r io.Reader) (li lockInfo, status int, err error) {
c := &countingReader{r: r}
if err = ixml.NewDecoder(c).Decode(&li); err != nil {
if err == io.EOF {
if c.n == 0 {
// An empty body means to refresh the lock.
// http://www.webdav.org/specs/rfc4918.html#refreshing-locks
return lockInfo{}, 0, nil
}
err = errInvalidLockInfo
}
return lockInfo{}, http.StatusBadRequest, err
}
// We only support exclusive (non-shared) write locks. In practice, these are
// the only types of locks that seem to matter.
if li.Exclusive == nil || li.Shared != nil || li.Write == nil {
return lockInfo{}, http.StatusNotImplemented, errUnsupportedLockInfo
}
return li, 0, nil
}
type countingReader struct {
n int
r io.Reader
@ -81,32 +26,12 @@ func (c *countingReader) Read(p []byte) (int, error) {
return n, err
}
func writeLockInfo(w io.Writer, token string, ld LockDetails) (int, error) {
depth := "infinity"
if ld.ZeroDepth {
depth = "0"
}
timeout := ld.Duration / time.Second
return fmt.Fprintf(w, "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n"+
"<D:prop xmlns:D=\"DAV:\"><D:lockdiscovery><D:activelock>\n"+
" <D:locktype><D:write/></D:locktype>\n"+
" <D:lockscope><D:exclusive/></D:lockscope>\n"+
" <D:depth>%s</D:depth>\n"+
" <D:owner>%s</D:owner>\n"+
" <D:timeout>Second-%d</D:timeout>\n"+
" <D:locktoken><D:href>%s</D:href></D:locktoken>\n"+
" <D:lockroot><D:href>%s</D:href></D:lockroot>\n"+
"</D:activelock></D:lockdiscovery></D:prop>",
depth, ld.OwnerXML, timeout, escape(token), escape(ld.Root),
)
}
func escape(s string) string {
for i := 0; i < len(s); i++ {
switch s[i] {
case '"', '&', '\'', '<', '>':
b := bytes.NewBuffer(nil)
ixml.EscapeText(b, []byte(s))
xml.EscapeText(b, []byte(s))
return b.String()
}
}
@ -118,14 +43,14 @@ func escape(s string) string {
// and directives.
// http://www.webdav.org/specs/rfc4918.html#property_values
// http://www.webdav.org/specs/rfc4918.html#xml-extensibility
func next(d *ixml.Decoder) (ixml.Token, error) {
func next(d *xml.Decoder) (xml.Token, error) {
for {
t, err := d.Token()
if err != nil {
return t, err
}
switch t.(type) {
case ixml.Comment, ixml.Directive, ixml.ProcInst:
case xml.Comment, xml.Directive, xml.ProcInst:
continue
default:
return t, nil
@ -140,25 +65,25 @@ type propfindProps []xml.Name
//
// It returns an error if start does not contain any properties or if
// properties contain values. Character data between properties is ignored.
func (pn *propfindProps) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error {
func (pn *propfindProps) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
for {
t, err := next(d)
if err != nil {
return err
}
switch t.(type) {
case ixml.EndElement:
case xml.EndElement:
if len(*pn) == 0 {
return fmt.Errorf("%s must not be empty", start.Name.Local)
}
return nil
case ixml.StartElement:
name := t.(ixml.StartElement).Name
case xml.StartElement:
name := t.(xml.StartElement).Name
t, err = next(d)
if err != nil {
return err
}
if _, ok := t.(ixml.EndElement); !ok {
if _, ok := t.(xml.EndElement); !ok {
return fmt.Errorf("unexpected token %T", t)
}
*pn = append(*pn, xml.Name(name))
@ -168,7 +93,7 @@ func (pn *propfindProps) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement)
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propfind
type propfind struct {
XMLName ixml.Name `xml:"DAV: propfind"`
XMLName xml.Name `xml:"DAV: propfind"`
Allprop *struct{} `xml:"DAV: allprop"`
Propname *struct{} `xml:"DAV: propname"`
Prop propfindProps `xml:"DAV: prop"`
@ -177,7 +102,7 @@ type propfind struct {
func readPropfind(r io.Reader) (pf propfind, status int, err error) {
c := countingReader{r: r}
if err = ixml.NewDecoder(&c).Decode(&pf); err != nil {
if err = xml.NewDecoder(&c).Decode(&pf); err != nil {
if err == io.EOF {
if c.n == 0 {
// An empty body means to propfind allprop.
@ -224,10 +149,10 @@ type Property struct {
InnerXML []byte `xml:",innerxml"`
}
// ixmlProperty is the same as the Property type except it holds an ixml.Name
// xmlProperty is the same as the Property type except it holds an xml.Name
// instead of an xml.Name.
type ixmlProperty struct {
XMLName ixml.Name
type xmlProperty struct {
XMLName xml.Name
Lang string `xml:"xml:lang,attr,omitempty"`
InnerXML []byte `xml:",innerxml"`
}
@ -235,7 +160,7 @@ type ixmlProperty struct {
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_error
// See multistatusWriter for the "D:" namespace prefix.
type xmlError struct {
XMLName ixml.Name `xml:"D:error"`
XMLName xml.Name `xml:"D:error"`
InnerXML []byte `xml:",innerxml"`
}
@ -248,10 +173,10 @@ type propstat struct {
ResponseDescription string `xml:"D:responsedescription,omitempty"`
}
// ixmlPropstat is the same as the propstat type except it holds an ixml.Name
// xmlPropstat is the same as the propstat type except it holds an xml.Name
// instead of an xml.Name.
type ixmlPropstat struct {
Prop []ixmlProperty `xml:"D:prop>_ignored_"`
type xmlPropstat struct {
Prop []xmlProperty `xml:"D:prop>_ignored_"`
Status string `xml:"D:status"`
Error *xmlError `xml:"D:error"`
ResponseDescription string `xml:"D:responsedescription,omitempty"`
@ -259,37 +184,37 @@ type ixmlPropstat struct {
// MarshalXML prepends the "D:" namespace prefix on properties in the DAV: namespace
// before encoding. See multistatusWriter.
func (ps propstat) MarshalXML(e *ixml.Encoder, start ixml.StartElement) error {
// Convert from a propstat to an ixmlPropstat.
ixmlPs := ixmlPropstat{
Prop: make([]ixmlProperty, len(ps.Prop)),
func (ps propstat) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
// Convert from a propstat to an xmlPropstat.
xmlPs := xmlPropstat{
Prop: make([]xmlProperty, len(ps.Prop)),
Status: ps.Status,
Error: ps.Error,
ResponseDescription: ps.ResponseDescription,
}
for k, prop := range ps.Prop {
ixmlPs.Prop[k] = ixmlProperty{
XMLName: ixml.Name(prop.XMLName),
xmlPs.Prop[k] = xmlProperty{
XMLName: xml.Name(prop.XMLName),
Lang: prop.Lang,
InnerXML: prop.InnerXML,
}
}
for k, prop := range ixmlPs.Prop {
for k, prop := range xmlPs.Prop {
if prop.XMLName.Space == "DAV:" {
prop.XMLName = ixml.Name{Space: "", Local: "D:" + prop.XMLName.Local}
ixmlPs.Prop[k] = prop
prop.XMLName = xml.Name{Space: "", Local: "D:" + prop.XMLName.Local}
xmlPs.Prop[k] = prop
}
}
// Distinct type to avoid infinite recursion of MarshalXML.
type newpropstat ixmlPropstat
return e.EncodeElement(newpropstat(ixmlPs), start)
type newpropstat xmlPropstat
return e.EncodeElement(newpropstat(xmlPs), start)
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_response
// See multistatusWriter for the "D:" namespace prefix.
type response struct {
XMLName ixml.Name `xml:"D:response"`
XMLName xml.Name `xml:"D:response"`
Href []string `xml:"D:href"`
Propstat []propstat `xml:"D:propstat"`
Status string `xml:"D:status,omitempty"`
@ -314,7 +239,7 @@ type multistatusWriter struct {
responseDescription string
w http.ResponseWriter
enc *ixml.Encoder
enc *xml.Encoder
}
// Write validates and emits a DAV response as part of a multistatus response
@ -353,19 +278,19 @@ func (w *multistatusWriter) writeHeader() error {
return nil
}
w.w.Header().Add("Content-Type", "text/xml; charset=utf-8")
w.w.WriteHeader(StatusMulti)
w.w.WriteHeader(http.StatusMultiStatus)
_, err := fmt.Fprintf(w.w, `<?xml version="1.0" encoding="UTF-8"?>`)
if err != nil {
return err
}
w.enc = ixml.NewEncoder(w.w)
return w.enc.EncodeToken(ixml.StartElement{
Name: ixml.Name{
w.enc = xml.NewEncoder(w.w)
return w.enc.EncodeToken(xml.StartElement{
Name: xml.Name{
Space: "DAV:",
Local: "multistatus",
},
Attr: []ixml.Attr{{
Name: ixml.Name{Space: "xmlns", Local: "D"},
Attr: []xml.Attr{{
Name: xml.Name{Space: "xmlns", Local: "D"},
Value: "DAV:",
}},
})
@ -379,17 +304,17 @@ func (w *multistatusWriter) close() error {
if w.enc == nil {
return nil
}
var end []ixml.Token
var end []xml.Token
if w.responseDescription != "" {
name := ixml.Name{Space: "DAV:", Local: "responsedescription"}
name := xml.Name{Space: "DAV:", Local: "responsedescription"}
end = append(end,
ixml.StartElement{Name: name},
ixml.CharData(w.responseDescription),
ixml.EndElement{Name: name},
xml.StartElement{Name: name},
xml.CharData(w.responseDescription),
xml.EndElement{Name: name},
)
}
end = append(end, ixml.EndElement{
Name: ixml.Name{Space: "DAV:", Local: "multistatus"},
end = append(end, xml.EndElement{
Name: xml.Name{Space: "DAV:", Local: "multistatus"},
})
for _, t := range end {
err := w.enc.EncodeToken(t)
@ -400,9 +325,9 @@ func (w *multistatusWriter) close() error {
return w.enc.Flush()
}
var xmlLangName = ixml.Name{Space: "http://www.w3.org/XML/1998/namespace", Local: "lang"}
var xmlLangName = xml.Name{Space: "http://www.w3.org/XML/1998/namespace", Local: "lang"}
func xmlLang(s ixml.StartElement, d string) string {
func xmlLang(s xml.StartElement, d string) string {
for _, attr := range s.Attr {
if attr.Name == xmlLangName {
return attr.Value
@ -413,19 +338,19 @@ func xmlLang(s ixml.StartElement, d string) string {
type xmlValue []byte
func (v *xmlValue) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error {
func (v *xmlValue) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
// The XML value of a property can be arbitrary, mixed-content XML.
// To make sure that the unmarshalled value contains all required
// namespaces, we encode all the property value XML tokens into a
// buffer. This forces the encoder to redeclare any used namespaces.
var b bytes.Buffer
e := ixml.NewEncoder(&b)
e := xml.NewEncoder(&b)
for {
t, err := next(d)
if err != nil {
return err
}
if e, ok := t.(ixml.EndElement); ok && e.Name == start.Name {
if e, ok := t.(xml.EndElement); ok && e.Name == start.Name {
break
}
if err = e.EncodeToken(t); err != nil {
@ -439,81 +364,3 @@ func (v *xmlValue) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error
*v = b.Bytes()
return nil
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_prop (for proppatch)
type proppatchProps []Property
// UnmarshalXML appends the property names and values enclosed within start
// to ps.
//
// An xml:lang attribute that is defined either on the DAV:prop or property
// name XML element is propagated to the property's Lang field.
//
// UnmarshalXML returns an error if start does not contain any properties or if
// property values contain syntactically incorrect XML.
func (ps *proppatchProps) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error {
lang := xmlLang(start, "")
for {
t, err := next(d)
if err != nil {
return err
}
switch elem := t.(type) {
case ixml.EndElement:
if len(*ps) == 0 {
return fmt.Errorf("%s must not be empty", start.Name.Local)
}
return nil
case ixml.StartElement:
p := Property{
XMLName: xml.Name(t.(ixml.StartElement).Name),
Lang: xmlLang(t.(ixml.StartElement), lang),
}
err = d.DecodeElement(((*xmlValue)(&p.InnerXML)), &elem)
if err != nil {
return err
}
*ps = append(*ps, p)
}
}
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_set
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_remove
type setRemove struct {
XMLName ixml.Name
Lang string `xml:"xml:lang,attr,omitempty"`
Prop proppatchProps `xml:"DAV: prop"`
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propertyupdate
type propertyupdate struct {
XMLName ixml.Name `xml:"DAV: propertyupdate"`
Lang string `xml:"xml:lang,attr,omitempty"`
SetRemove []setRemove `xml:",any"`
}
func readProppatch(r io.Reader) (patches []Proppatch, status int, err error) {
var pu propertyupdate
if err = ixml.NewDecoder(r).Decode(&pu); err != nil {
return nil, http.StatusBadRequest, err
}
for _, op := range pu.SetRemove {
remove := false
switch op.XMLName {
case ixml.Name{Space: "DAV:", Local: "set"}:
// No-op.
case ixml.Name{Space: "DAV:", Local: "remove"}:
for _, p := range op.Prop {
if len(p.InnerXML) > 0 {
return nil, http.StatusBadRequest, errInvalidProppatch
}
}
remove = true
default:
return nil, http.StatusBadRequest, errInvalidProppatch
}
patches = append(patches, Proppatch{Remove: remove, Props: op.Prop})
}
return patches, 0, nil
}