Merge pull request #2284 from fjl/accounts-addr-cache

accounts: cache key addresses
This commit is contained in:
Péter Szilágyi 2016-04-12 17:51:09 +03:00
commit 1e9b504ee7
96 changed files with 7364 additions and 1231 deletions

8
Godeps/Godeps.json generated
View File

@ -9,6 +9,10 @@
"ImportPath": "github.com/Gustav-Simonsson/go-opencl/cl",
"Rev": "593e01cfc4f3353585015321e01951d4a907d3ef"
},
{
"ImportPath": "github.com/cespare/cp",
"Rev": "165db2f241fd235aec29ba6d9b1ccd5f1c14637c"
},
{
"ImportPath": "github.com/codegangsta/cli",
"Comment": "1.2.0-215-g0ab42fd",
@ -115,6 +119,10 @@
"ImportPath": "github.com/rcrowley/go-metrics",
"Rev": "51425a2415d21afadfd55cd93432c0bc69e9598d"
},
{
"ImportPath": "github.com/rjeczalik/notify",
"Rev": "5dd6205716539662f8f14ab513552b41eab69d5d"
},
{
"ImportPath": "github.com/robertkrimen/otto",
"Rev": "53221230c215611a90762720c9042ac782ef74ee"

View File

@ -0,0 +1,19 @@
Copyright (c) 2015 Caleb Spare
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -0,0 +1,9 @@
# cp
[![GoDoc](https://godoc.org/github.com/cespare/cp?status.svg)](https://godoc.org/github.com/cespare/cp)
cp is a small Go package for copying files and directories.
The API may change because I want to add some options in the future (for merging with existing dirs).
It does not currently handle Windows specifically (I think it may require some special treatment).

58
Godeps/_workspace/src/github.com/cespare/cp/cp.go generated vendored Normal file
View File

@ -0,0 +1,58 @@
// Package cp offers simple file and directory copying for Go.
package cp
import (
"errors"
"io"
"os"
"path/filepath"
"strings"
)
var errCopyFileWithDir = errors.New("dir argument to CopyFile")
// CopyFile copies the file with path src to dst. The new file must not exist.
// It is created with the same permissions as src.
func CopyFile(dst, src string) error {
rf, err := os.Open(src)
if err != nil {
return err
}
defer rf.Close()
rstat, err := rf.Stat()
if err != nil {
return err
}
if rstat.IsDir() {
return errCopyFileWithDir
}
wf, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_EXCL, rstat.Mode())
if err != nil {
return err
}
if _, err := io.Copy(wf, rf); err != nil {
wf.Close()
return err
}
return wf.Close()
}
// CopyAll copies the file or (recursively) the directory at src to dst.
// Permissions are preserved. dst must not already exist.
func CopyAll(dst, src string) error {
return filepath.Walk(src, makeWalkFn(dst, src))
}
func makeWalkFn(dst, src string) filepath.WalkFunc {
return func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
dstPath := filepath.Join(dst, strings.TrimPrefix(path, src))
if info.IsDir() {
return os.Mkdir(dstPath, info.Mode())
}
return CopyFile(dstPath, path)
}
}

View File

@ -0,0 +1,88 @@
# Created by https://www.gitignore.io
### OSX ###
.DS_Store
.AppleDouble
.LSOverride
# Icon must end with two \r
Icon
# Thumbnails
._*
# Files that might appear on external disk
.Spotlight-V100
.Trashes
# Directories potentially created on remote AFP share
.AppleDB
.AppleDesktop
Network Trash Folder
Temporary Items
.apdisk
### Windows ###
# Windows image file caches
Thumbs.db
ehthumbs.db
# Folder config file
Desktop.ini
# Recycle Bin used on file shares
$RECYCLE.BIN/
# Windows Installer files
*.cab
*.msi
*.msm
*.msp
# Windows shortcuts
*.lnk
### Linux ###
*~
# KDE directory preferences
.directory
### Go ###
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof
### vim ###
[._]*.s[a-w][a-z]
[._]s[a-w][a-z]
*.un~
Session.vim
.netrwhist
*~

View File

@ -0,0 +1,30 @@
language: go
go:
- 1.4.3
- 1.6
os:
- linux
- osx
matrix:
include:
- os: osx
go: 1.6
env:
- GOFLAGS="-tags kqueue"
env:
global:
- GOBIN=$HOME/bin
- PATH=$HOME/bin:$PATH
install:
- go get golang.org/x/tools/cmd/vet
- go get -t -v ./...
script:
- go tool vet -all .
- go install $GOFLAGS ./...
- go test -v -race $GOFLAGS ./...

View File

@ -0,0 +1,10 @@
# List of individuals who contributed to the Notify package.
#
# The up-to-date list of the authors one may obtain with:
#
# ~ $ git shortlog -es | cut -f2 | rev | uniq -f1 | rev
#
Pawel Blaszczyk <blaszczykpb@gmail.com>
Pawel Knap <pawelknap88@gmail.com>
Rafal Jeczalik <rjeczalik@gmail.com>

View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2014-2015 The Notify Authors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -0,0 +1,21 @@
notify [![GoDoc](https://godoc.org/github.com/rjeczalik/notify?status.svg)](https://godoc.org/github.com/rjeczalik/notify) [![Build Status](https://img.shields.io/travis/rjeczalik/notify/master.svg)](https://travis-ci.org/rjeczalik/notify "inotify + FSEvents + kqueue") [![Build status](https://img.shields.io/appveyor/ci/rjeczalik/notify-246.svg)](https://ci.appveyor.com/project/rjeczalik/notify-246 "ReadDirectoryChangesW") [![Coverage Status](https://img.shields.io/coveralls/rjeczalik/notify/master.svg)](https://coveralls.io/r/rjeczalik/notify?branch=master)
======
Filesystem event notification library on steroids. (under active development)
*Documentation*
[godoc.org/github.com/rjeczalik/notify](https://godoc.org/github.com/rjeczalik/notify)
*Installation*
```
~ $ go get -u github.com/rjeczalik/notify
```
*Projects using notify*
- [github.com/rjeczalik/cmd/notify](https://godoc.org/github.com/rjeczalik/cmd/notify)
- [github.com/cortesi/devd](https://github.com/cortesi/devd)
- [github.com/cortesi/modd](https://github.com/cortesi/modd)

View File

@ -0,0 +1,24 @@
version: "{build}"
os: Windows Server 2012 R2
clone_folder: c:\projects\src\github.com\rjeczalik\notify
environment:
PATH: c:\projects\bin;%PATH%
GOPATH: c:\projects
NOTIFY_TIMEOUT: 5s
install:
- go version
- go get golang.org/x/tools/cmd/vet
- go get -v -t ./...
build_script:
- go tool vet -all .
- go build ./...
- go test -v -race ./...
test: off
deploy: off

View File

@ -0,0 +1,11 @@
// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
// Use of this source code is governed by the MIT license that can be
// found in the LICENSE file.
// +build !debug
package notify
func dbgprint(...interface{}) {}
func dbgprintf(string, ...interface{}) {}

View File

@ -0,0 +1,43 @@
// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
// Use of this source code is governed by the MIT license that can be
// found in the LICENSE file.
// +build debug
package notify
import (
"fmt"
"os"
"runtime"
"strings"
)
func dbgprint(v ...interface{}) {
fmt.Printf("[D] ")
fmt.Print(v...)
fmt.Printf("\n\n")
}
func dbgprintf(format string, v ...interface{}) {
fmt.Printf("[D] ")
fmt.Printf(format, v...)
fmt.Printf("\n\n")
}
func dbgcallstack(max int) []string {
pc, stack := make([]uintptr, max), make([]string, 0, max)
runtime.Callers(2, pc)
for _, pc := range pc {
if f := runtime.FuncForPC(pc); f != nil {
fname := f.Name()
idx := strings.LastIndex(fname, string(os.PathSeparator))
if idx != -1 {
stack = append(stack, fname[idx+1:])
} else {
stack = append(stack, fname)
}
}
}
return stack
}

View File

@ -0,0 +1,40 @@
// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
// Use of this source code is governed by the MIT license that can be
// found in the LICENSE file.
// Package notify implements access to filesystem events.
//
// Notify is a high-level abstraction over filesystem watchers like inotify,
// kqueue, FSEvents, FEN or ReadDirectoryChangesW. Watcher implementations are
// split into two groups: ones that natively support recursive notifications
// (FSEvents and ReadDirectoryChangesW) and ones that do not (inotify, kqueue, FEN).
// For more details see watcher and recursiveWatcher interfaces in watcher.go
// source file.
//
// On top of filesystem watchers notify maintains a watchpoint tree, which provides
// strategy for creating and closing filesystem watches and dispatching filesystem
// events to user channels.
//
// An event set is just an event list joint using bitwise OR operator
// into a single event value.
//
// A filesystem watch or just a watch is platform-specific entity which represents
// a single path registered for notifications for specific event set. Setting a watch
// means using platform-specific API calls for creating / initializing said watch.
// For each watcher the API call is:
//
// - FSEvents: FSEventStreamCreate
// - inotify: notify_add_watch
// - kqueue: kevent
// - ReadDirectoryChangesW: CreateFile+ReadDirectoryChangesW
// - FEN: port_get
//
// To rewatch means to either shrink or expand an event set that was previously
// registered during watch operation for particular filesystem watch.
//
// A watchpoint is a list of user channel and event set pairs for particular
// path (watchpoint tree's node). A single watchpoint can contain multiple
// different user channels registered to listen for one or more events. A single
// user channel can be registered in one or more watchpoints, recurisve and
// non-recursive ones as well.
package notify

View File

@ -0,0 +1,143 @@
// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
// Use of this source code is governed by the MIT license that can be
// found in the LICENSE file.
package notify
import (
"fmt"
"strings"
)
// Event represents the type of filesystem action.
//
// Number of available event values is dependent on the target system or the
// watcher implmenetation used (e.g. it's possible to use either kqueue or
// FSEvents on Darwin).
//
// Please consult documentation for your target platform to see list of all
// available events.
type Event uint32
// Create, Remove, Write and Rename are the only event values guaranteed to be
// present on all platforms.
const (
Create = osSpecificCreate
Remove = osSpecificRemove
Write = osSpecificWrite
Rename = osSpecificRename
// All is handful alias for all platform-independent event values.
All = Create | Remove | Write | Rename
)
const internal = recursive | omit
// String implements fmt.Stringer interface.
func (e Event) String() string {
var s []string
for _, strmap := range []map[Event]string{estr, osestr} {
for ev, str := range strmap {
if e&ev == ev {
s = append(s, str)
}
}
}
return strings.Join(s, "|")
}
// EventInfo describes an event reported by the underlying filesystem notification
// subsystem.
//
// It always describes single event, even if the OS reported a coalesced action.
// Reported path is absolute and clean.
//
// For non-recursive watchpoints its base is always equal to the path passed
// to corresponding Watch call.
//
// The value of Sys if system-dependent and can be nil.
//
// Sys
//
// Under Darwin (FSEvents) Sys() always returns a non-nil *notify.FSEvent value,
// which is defined as:
//
// type FSEvent struct {
// Path string // real path of the file or directory
// ID uint64 // ID of the event (FSEventStreamEventId)
// Flags uint32 // joint FSEvents* flags (FSEventStreamEventFlags)
// }
//
// For possible values of Flags see Darwin godoc for notify or FSEvents
// documentation for FSEventStreamEventFlags constants:
//
// https://developer.apple.com/library/mac/documentation/Darwin/Reference/FSEvents_Ref/index.html#//apple_ref/doc/constant_group/FSEventStreamEventFlags
//
// Under Linux (inotify) Sys() always returns a non-nil *syscall.InotifyEvent
// value, defined as:
//
// type InotifyEvent struct {
// Wd int32 // Watch descriptor
// Mask uint32 // Mask describing event
// Cookie uint32 // Unique cookie associating related events (for rename(2))
// Len uint32 // Size of name field
// Name [0]uint8 // Optional null-terminated name
// }
//
// More information about inotify masks and the usage of inotify_event structure
// can be found at:
//
// http://man7.org/linux/man-pages/man7/inotify.7.html
//
// Under Darwin, DragonFlyBSD, FreeBSD, NetBSD, OpenBSD (kqueue) Sys() always
// returns a non-nil *notify.Kevent value, which is defined as:
//
// type Kevent struct {
// Kevent *syscall.Kevent_t // Kevent is a kqueue specific structure
// FI os.FileInfo // FI describes file/dir
// }
//
// More information about syscall.Kevent_t can be found at:
//
// https://www.freebsd.org/cgi/man.cgi?query=kqueue
//
// Under Windows (ReadDirectoryChangesW) Sys() always returns nil. The documentation
// of watcher's WinAPI function can be found at:
//
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa365465%28v=vs.85%29.aspx
type EventInfo interface {
Event() Event // event value for the filesystem action
Path() string // real path of the file or directory
Sys() interface{} // underlying data source (can return nil)
}
type isDirer interface {
isDir() (bool, error)
}
var _ fmt.Stringer = (*event)(nil)
var _ isDirer = (*event)(nil)
// String implements fmt.Stringer interface.
func (e *event) String() string {
return e.Event().String() + `: "` + e.Path() + `"`
}
var estr = map[Event]string{
Create: "notify.Create",
Remove: "notify.Remove",
Write: "notify.Write",
Rename: "notify.Rename",
// Display name for recursive event is added only for debugging
// purposes. It's an internal event after all and won't be exposed to the
// user. Having Recursive event printable is helpful, e.g. for reading
// testing failure messages:
//
// --- FAIL: TestWatchpoint (0.00 seconds)
// watchpoint_test.go:64: want diff=[notify.Remove notify.Create|notify.Remove];
// got [notify.Remove notify.Remove|notify.Create] (i=1)
//
// Yup, here the diff have Recursive event inside. Go figure.
recursive: "recursive",
omit: "omit",
}

View File

@ -0,0 +1,46 @@
// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
// Use of this source code is governed by the MIT license that can be
// found in the LICENSE file.
// +build solaris
package notify
const (
osSpecificCreate Event = 0x00000100 << iota
osSpecificRemove
osSpecificWrite
osSpecificRename
// internal
// recursive is used to distinguish recursive eventsets from non-recursive ones
recursive
// omit is used for dispatching internal events; only those events are sent
// for which both the event and the watchpoint has omit in theirs event sets.
omit
)
const (
FileAccess = fileAccess
FileModified = fileModified
FileAttrib = fileAttrib
FileDelete = fileDelete
FileRenameTo = fileRenameTo
FileRenameFrom = fileRenameFrom
FileTrunc = fileTrunc
FileNoFollow = fileNoFollow
Unmounted = unmounted
MountedOver = mountedOver
)
var osestr = map[Event]string{
FileAccess: "notify.FileAccess",
FileModified: "notify.FileModified",
FileAttrib: "notify.FileAttrib",
FileDelete: "notify.FileDelete",
FileRenameTo: "notify.FileRenameTo",
FileRenameFrom: "notify.FileRenameFrom",
FileTrunc: "notify.FileTrunc",
FileNoFollow: "notify.FileNoFollow",
Unmounted: "notify.Unmounted",
MountedOver: "notify.MountedOver",
}

View File

@ -0,0 +1,71 @@
// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
// Use of this source code is governed by the MIT license that can be
// found in the LICENSE file.
// +build darwin,!kqueue
package notify
const (
osSpecificCreate = Event(FSEventsCreated)
osSpecificRemove = Event(FSEventsRemoved)
osSpecificWrite = Event(FSEventsModified)
osSpecificRename = Event(FSEventsRenamed)
// internal = Event(0x100000)
// recursive is used to distinguish recursive eventsets from non-recursive ones
recursive = Event(0x200000)
// omit is used for dispatching internal events; only those events are sent
// for which both the event and the watchpoint has omit in theirs event sets.
omit = Event(0x400000)
)
// FSEvents specific event values.
const (
FSEventsMustScanSubDirs Event = 0x00001
FSEventsUserDropped = 0x00002
FSEventsKernelDropped = 0x00004
FSEventsEventIdsWrapped = 0x00008
FSEventsHistoryDone = 0x00010
FSEventsRootChanged = 0x00020
FSEventsMount = 0x00040
FSEventsUnmount = 0x00080
FSEventsCreated = 0x00100
FSEventsRemoved = 0x00200
FSEventsInodeMetaMod = 0x00400
FSEventsRenamed = 0x00800
FSEventsModified = 0x01000
FSEventsFinderInfoMod = 0x02000
FSEventsChangeOwner = 0x04000
FSEventsXattrMod = 0x08000
FSEventsIsFile = 0x10000
FSEventsIsDir = 0x20000
FSEventsIsSymlink = 0x40000
)
var osestr = map[Event]string{
FSEventsMustScanSubDirs: "notify.FSEventsMustScanSubDirs",
FSEventsUserDropped: "notify.FSEventsUserDropped",
FSEventsKernelDropped: "notify.FSEventsKernelDropped",
FSEventsEventIdsWrapped: "notify.FSEventsEventIdsWrapped",
FSEventsHistoryDone: "notify.FSEventsHistoryDone",
FSEventsRootChanged: "notify.FSEventsRootChanged",
FSEventsMount: "notify.FSEventsMount",
FSEventsUnmount: "notify.FSEventsUnmount",
FSEventsInodeMetaMod: "notify.FSEventsInodeMetaMod",
FSEventsFinderInfoMod: "notify.FSEventsFinderInfoMod",
FSEventsChangeOwner: "notify.FSEventsChangeOwner",
FSEventsXattrMod: "notify.FSEventsXattrMod",
FSEventsIsFile: "notify.FSEventsIsFile",
FSEventsIsDir: "notify.FSEventsIsDir",
FSEventsIsSymlink: "notify.FSEventsIsSymlink",
}
type event struct {
fse FSEvent
event Event
}
func (ei *event) Event() Event { return ei.event }
func (ei *event) Path() string { return ei.fse.Path }
func (ei *event) Sys() interface{} { return &ei.fse }
func (ei *event) isDir() (bool, error) { return ei.fse.Flags&FSEventsIsDir != 0, nil }

View File

@ -0,0 +1,75 @@
// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
// Use of this source code is governed by the MIT license that can be
// found in the LICENSE file.
// +build linux
package notify
import "syscall"
// Platform independent event values.
const (
osSpecificCreate Event = 0x100000 << iota
osSpecificRemove
osSpecificWrite
osSpecificRename
// internal
// recursive is used to distinguish recursive eventsets from non-recursive ones
recursive
// omit is used for dispatching internal events; only those events are sent
// for which both the event and the watchpoint has omit in theirs event sets.
omit
)
// Inotify specific masks are legal, implemented events that are guaranteed to
// work with notify package on linux-based systems.
const (
InAccess = Event(syscall.IN_ACCESS) // File was accessed
InModify = Event(syscall.IN_MODIFY) // File was modified
InAttrib = Event(syscall.IN_ATTRIB) // Metadata changed
InCloseWrite = Event(syscall.IN_CLOSE_WRITE) // Writtable file was closed
InCloseNowrite = Event(syscall.IN_CLOSE_NOWRITE) // Unwrittable file closed
InOpen = Event(syscall.IN_OPEN) // File was opened
InMovedFrom = Event(syscall.IN_MOVED_FROM) // File was moved from X
InMovedTo = Event(syscall.IN_MOVED_TO) // File was moved to Y
InCreate = Event(syscall.IN_CREATE) // Subfile was created
InDelete = Event(syscall.IN_DELETE) // Subfile was deleted
InDeleteSelf = Event(syscall.IN_DELETE_SELF) // Self was deleted
InMoveSelf = Event(syscall.IN_MOVE_SELF) // Self was moved
)
var osestr = map[Event]string{
InAccess: "notify.InAccess",
InModify: "notify.InModify",
InAttrib: "notify.InAttrib",
InCloseWrite: "notify.InCloseWrite",
InCloseNowrite: "notify.InCloseNowrite",
InOpen: "notify.InOpen",
InMovedFrom: "notify.InMovedFrom",
InMovedTo: "notify.InMovedTo",
InCreate: "notify.InCreate",
InDelete: "notify.InDelete",
InDeleteSelf: "notify.InDeleteSelf",
InMoveSelf: "notify.InMoveSelf",
}
// Inotify behavior events are not **currently** supported by notify package.
const (
inDontFollow = Event(syscall.IN_DONT_FOLLOW)
inExclUnlink = Event(syscall.IN_EXCL_UNLINK)
inMaskAdd = Event(syscall.IN_MASK_ADD)
inOneshot = Event(syscall.IN_ONESHOT)
inOnlydir = Event(syscall.IN_ONLYDIR)
)
type event struct {
sys syscall.InotifyEvent
path string
event Event
}
func (e *event) Event() Event { return e.event }
func (e *event) Path() string { return e.path }
func (e *event) Sys() interface{} { return &e.sys }
func (e *event) isDir() (bool, error) { return e.sys.Mask&syscall.IN_ISDIR != 0, nil }

View File

@ -0,0 +1,59 @@
// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
// Use of this source code is governed by the MIT license that can be
// found in the LICENSE file.
// +build darwin,kqueue dragonfly freebsd netbsd openbsd
package notify
import "syscall"
// TODO(pblaszczyk): ensure in runtime notify built-in event values do not
// overlap with platform-defined ones.
// Platform independent event values.
const (
osSpecificCreate Event = 0x0100 << iota
osSpecificRemove
osSpecificWrite
osSpecificRename
// internal
// recursive is used to distinguish recursive eventsets from non-recursive ones
recursive
// omit is used for dispatching internal events; only those events are sent
// for which both the event and the watchpoint has omit in theirs event sets.
omit
)
const (
// NoteDelete is an even reported when the unlink() system call was called
// on the file referenced by the descriptor.
NoteDelete = Event(syscall.NOTE_DELETE)
// NoteWrite is an event reported when a write occurred on the file
// referenced by the descriptor.
NoteWrite = Event(syscall.NOTE_WRITE)
// NoteExtend is an event reported when the file referenced by the
// descriptor was extended.
NoteExtend = Event(syscall.NOTE_EXTEND)
// NoteAttrib is an event reported when the file referenced
// by the descriptor had its attributes changed.
NoteAttrib = Event(syscall.NOTE_ATTRIB)
// NoteLink is an event reported when the link count on the file changed.
NoteLink = Event(syscall.NOTE_LINK)
// NoteRename is an event reported when the file referenced
// by the descriptor was renamed.
NoteRename = Event(syscall.NOTE_RENAME)
// NoteRevoke is an event reported when access to the file was revoked via
// revoke(2) or the underlying file system was unmounted.
NoteRevoke = Event(syscall.NOTE_REVOKE)
)
var osestr = map[Event]string{
NoteDelete: "notify.NoteDelete",
NoteWrite: "notify.NoteWrite",
NoteExtend: "notify.NoteExtend",
NoteAttrib: "notify.NoteAttrib",
NoteLink: "notify.NoteLink",
NoteRename: "notify.NoteRename",
NoteRevoke: "notify.NoteRevoke",
}

View File

@ -0,0 +1,108 @@
// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
// Use of this source code is governed by the MIT license that can be
// found in the LICENSE file.
// +build windows
package notify
import (
"os"
"path/filepath"
"syscall"
)
// Platform independent event values.
const (
osSpecificCreate Event = 1 << (20 + iota)
osSpecificRemove
osSpecificWrite
osSpecificRename
// recursive is used to distinguish recursive eventsets from non-recursive ones
recursive
// omit is used for dispatching internal events; only those events are sent
// for which both the event and the watchpoint has omit in theirs event sets.
omit
// dirmarker TODO(pknap)
dirmarker
)
// ReadDirectoryChangesW filters.
const (
FileNotifyChangeFileName = Event(syscall.FILE_NOTIFY_CHANGE_FILE_NAME)
FileNotifyChangeDirName = Event(syscall.FILE_NOTIFY_CHANGE_DIR_NAME)
FileNotifyChangeAttributes = Event(syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES)
FileNotifyChangeSize = Event(syscall.FILE_NOTIFY_CHANGE_SIZE)
FileNotifyChangeLastWrite = Event(syscall.FILE_NOTIFY_CHANGE_LAST_WRITE)
FileNotifyChangeLastAccess = Event(syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS)
FileNotifyChangeCreation = Event(syscall.FILE_NOTIFY_CHANGE_CREATION)
FileNotifyChangeSecurity = Event(syscallFileNotifyChangeSecurity)
)
const (
fileNotifyChangeAll = 0x17f // logical sum of all FileNotifyChange* events.
fileNotifyChangeModified = fileNotifyChangeAll &^ (FileNotifyChangeFileName | FileNotifyChangeDirName)
)
// according to: http://msdn.microsoft.com/en-us/library/windows/desktop/aa365465(v=vs.85).aspx
// this flag should be declared in: http://golang.org/src/pkg/syscall/ztypes_windows.go
const syscallFileNotifyChangeSecurity = 0x00000100
// ReadDirectoryChangesW actions.
const (
FileActionAdded = Event(syscall.FILE_ACTION_ADDED) << 12
FileActionRemoved = Event(syscall.FILE_ACTION_REMOVED) << 12
FileActionModified = Event(syscall.FILE_ACTION_MODIFIED) << 14
FileActionRenamedOldName = Event(syscall.FILE_ACTION_RENAMED_OLD_NAME) << 15
FileActionRenamedNewName = Event(syscall.FILE_ACTION_RENAMED_NEW_NAME) << 16
)
const fileActionAll = 0x7f000 // logical sum of all FileAction* events.
var osestr = map[Event]string{
FileNotifyChangeFileName: "notify.FileNotifyChangeFileName",
FileNotifyChangeDirName: "notify.FileNotifyChangeDirName",
FileNotifyChangeAttributes: "notify.FileNotifyChangeAttributes",
FileNotifyChangeSize: "notify.FileNotifyChangeSize",
FileNotifyChangeLastWrite: "notify.FileNotifyChangeLastWrite",
FileNotifyChangeLastAccess: "notify.FileNotifyChangeLastAccess",
FileNotifyChangeCreation: "notify.FileNotifyChangeCreation",
FileNotifyChangeSecurity: "notify.FileNotifyChangeSecurity",
FileActionAdded: "notify.FileActionAdded",
FileActionRemoved: "notify.FileActionRemoved",
FileActionModified: "notify.FileActionModified",
FileActionRenamedOldName: "notify.FileActionRenamedOldName",
FileActionRenamedNewName: "notify.FileActionRenamedNewName",
}
const (
fTypeUnknown uint8 = iota
fTypeFile
fTypeDirectory
)
// TODO(ppknap) : doc.
type event struct {
pathw []uint16
name string
ftype uint8
action uint32
filter uint32
e Event
}
func (e *event) Event() Event { return e.e }
func (e *event) Path() string { return filepath.Join(syscall.UTF16ToString(e.pathw), e.name) }
func (e *event) Sys() interface{} { return e.ftype }
func (e *event) isDir() (bool, error) {
if e.ftype != fTypeUnknown {
return e.ftype == fTypeDirectory, nil
}
fi, err := os.Stat(e.Path())
if err != nil {
return false, err
}
return fi.IsDir(), nil
}

View File

@ -0,0 +1,31 @@
// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
// Use of this source code is governed by the MIT license that can be
// found in the LICENSE file.
// +build !darwin,!linux,!freebsd,!dragonfly,!netbsd,!openbsd,!windows
// +build !kqueue,!solaris
package notify
// Platform independent event values.
const (
osSpecificCreate Event = 1 << iota
osSpecificRemove
osSpecificWrite
osSpecificRename
// internal
// recursive is used to distinguish recursive eventsets from non-recursive ones
recursive
// omit is used for dispatching internal events; only those events are sent
// for which both the event and the watchpoint has omit in theirs event sets.
omit
)
var osestr = map[Event]string{}
type event struct{}
func (e *event) Event() (_ Event) { return }
func (e *event) Path() (_ string) { return }
func (e *event) Sys() (_ interface{}) { return }
func (e *event) isDir() (_ bool, _ error) { return }

View File

@ -0,0 +1,22 @@
// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
// Use of this source code is governed by the MIT license that can be
// found in the LICENSE file.
// +build darwin,kqueue dragonfly freebsd netbsd openbsd solaris
package notify
type event struct {
p string
e Event
d bool
pe interface{}
}
func (e *event) Event() Event { return e.e }
func (e *event) Path() string { return e.p }
func (e *event) Sys() interface{} { return e.pe }
func (e *event) isDir() (bool, error) { return e.d, nil }

View File

@ -0,0 +1,271 @@
// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
// Use of this source code is governed by the MIT license that can be
// found in the LICENSE file.
package notify
import (
"errors"
"io/ioutil"
"os"
"path/filepath"
)
var errSkip = errors.New("notify: skip")
type walkPathFunc func(nd node, isbase bool) error
type walkFunc func(node) error
func errnotexist(name string) error {
return &os.PathError{
Op: "Node",
Path: name,
Err: os.ErrNotExist,
}
}
type node struct {
Name string
Watch watchpoint
Child map[string]node
}
func newnode(name string) node {
return node{
Name: name,
Watch: make(watchpoint),
Child: make(map[string]node),
}
}
func (nd node) addchild(name, base string) node {
child, ok := nd.Child[base]
if !ok {
child = newnode(name)
nd.Child[base] = child
}
return child
}
func (nd node) Add(name string) node {
i := indexbase(nd.Name, name)
if i == -1 {
return node{}
}
for j := indexSep(name[i:]); j != -1; j = indexSep(name[i:]) {
nd = nd.addchild(name[:i+j], name[i:i+j])
i += j + 1
}
return nd.addchild(name, name[i:])
}
func (nd node) AddDir(fn walkFunc) error {
stack := []node{nd}
Traverse:
for n := len(stack); n != 0; n = len(stack) {
nd, stack = stack[n-1], stack[:n-1]
switch err := fn(nd); err {
case nil:
case errSkip:
continue Traverse
default:
return err
}
// TODO(rjeczalik): tolerate open failures - add failed names to
// AddDirError and notify users which names are not added to the tree.
fi, err := ioutil.ReadDir(nd.Name)
if err != nil {
return err
}
for _, fi := range fi {
if fi.Mode()&(os.ModeSymlink|os.ModeDir) == os.ModeDir {
name := filepath.Join(nd.Name, fi.Name())
stack = append(stack, nd.addchild(name, name[len(nd.Name)+1:]))
}
}
}
return nil
}
func (nd node) Get(name string) (node, error) {
i := indexbase(nd.Name, name)
if i == -1 {
return node{}, errnotexist(name)
}
ok := false
for j := indexSep(name[i:]); j != -1; j = indexSep(name[i:]) {
if nd, ok = nd.Child[name[i:i+j]]; !ok {
return node{}, errnotexist(name)
}
i += j + 1
}
if nd, ok = nd.Child[name[i:]]; !ok {
return node{}, errnotexist(name)
}
return nd, nil
}
func (nd node) Del(name string) error {
i := indexbase(nd.Name, name)
if i == -1 {
return errnotexist(name)
}
stack := []node{nd}
ok := false
for j := indexSep(name[i:]); j != -1; j = indexSep(name[i:]) {
if nd, ok = nd.Child[name[i:i+j]]; !ok {
return errnotexist(name[:i+j])
}
stack = append(stack, nd)
}
if nd, ok = nd.Child[name[i:]]; !ok {
return errnotexist(name)
}
nd.Child = nil
nd.Watch = nil
for name, i = base(nd.Name), len(stack); i != 0; name, i = base(nd.Name), i-1 {
nd = stack[i-1]
if nd := nd.Child[name]; len(nd.Watch) > 1 || len(nd.Child) != 0 {
break
} else {
nd.Child = nil
nd.Watch = nil
}
delete(nd.Child, name)
}
return nil
}
func (nd node) Walk(fn walkFunc) error {
stack := []node{nd}
Traverse:
for n := len(stack); n != 0; n = len(stack) {
nd, stack = stack[n-1], stack[:n-1]
switch err := fn(nd); err {
case nil:
case errSkip:
continue Traverse
default:
return err
}
for name, nd := range nd.Child {
if name == "" {
// Node storing inactive watchpoints has empty name, skip it
// form traversing. Root node has also an empty name, but it
// never has a parent node.
continue
}
stack = append(stack, nd)
}
}
return nil
}
func (nd node) WalkPath(name string, fn walkPathFunc) error {
i := indexbase(nd.Name, name)
if i == -1 {
return errnotexist(name)
}
ok := false
for j := indexSep(name[i:]); j != -1; j = indexSep(name[i:]) {
switch err := fn(nd, false); err {
case nil:
case errSkip:
return nil
default:
return err
}
if nd, ok = nd.Child[name[i:i+j]]; !ok {
return errnotexist(name[:i+j])
}
i += j + 1
}
switch err := fn(nd, false); err {
case nil:
case errSkip:
return nil
default:
return err
}
if nd, ok = nd.Child[name[i:]]; !ok {
return errnotexist(name)
}
switch err := fn(nd, true); err {
case nil, errSkip:
return nil
default:
return err
}
}
type root struct {
nd node
}
func (r root) addroot(name string) node {
if vol := filepath.VolumeName(name); vol != "" {
root, ok := r.nd.Child[vol]
if !ok {
root = r.nd.addchild(vol, vol)
}
return root
}
return r.nd
}
func (r root) root(name string) (node, error) {
if vol := filepath.VolumeName(name); vol != "" {
nd, ok := r.nd.Child[vol]
if !ok {
return node{}, errnotexist(name)
}
return nd, nil
}
return r.nd, nil
}
func (r root) Add(name string) node {
return r.addroot(name).Add(name)
}
func (r root) AddDir(dir string, fn walkFunc) error {
return r.Add(dir).AddDir(fn)
}
func (r root) Del(name string) error {
nd, err := r.root(name)
if err != nil {
return err
}
return nd.Del(name)
}
func (r root) Get(name string) (node, error) {
nd, err := r.root(name)
if err != nil {
return node{}, err
}
if nd.Name != name {
if nd, err = nd.Get(name); err != nil {
return node{}, err
}
}
return nd, nil
}
func (r root) Walk(name string, fn walkFunc) error {
nd, err := r.Get(name)
if err != nil {
return err
}
return nd.Walk(fn)
}
func (r root) WalkPath(name string, fn walkPathFunc) error {
nd, err := r.root(name)
if err != nil {
return err
}
return nd.WalkPath(name, fn)
}

View File

@ -0,0 +1,74 @@
// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
// Use of this source code is governed by the MIT license that can be
// found in the LICENSE file.
// BUG(rjeczalik): Notify does not collect watchpoints, when underlying watches
// were removed by their os-specific watcher implementations. Instead users are
// advised to listen on persistant paths to have guarantee they receive events
// for the whole lifetime of their applications (to discuss see #69).
// BUG(ppknap): Linux (inotify) does not support watcher behavior masks like
// InOneshot, InOnlydir etc. Instead users are advised to perform the filtering
// themselves (to discuss see #71).
// BUG(ppknap): Notify was not tested for short path name support under Windows
// (ReadDirectoryChangesW).
// BUG(ppknap): Windows (ReadDirectoryChangesW) cannot recognize which notification
// triggers FileActionModified event. (to discuss see #75).
package notify
var defaultTree = newTree()
// Watch sets up a watchpoint on path listening for events given by the events
// argument.
//
// File or directory given by the path must exist, otherwise Watch will fail
// with non-nil error. Notify resolves, for its internal purpose, any symlinks
// the provided path may contain, so it may fail if the symlinks form a cycle.
// It does so, since not all watcher implementations treat passed paths as-is.
// E.g. FSEvents reports a real path for every event, setting a watchpoint
// on /tmp will report events with paths rooted at /private/tmp etc.
//
// The c almost always is a buffered channel. Watch will not block sending to c
// - the caller must ensure that c has sufficient buffer space to keep up with
// the expected event rate.
//
// It is allowed to pass the same channel multiple times with different event
// list or different paths. Calling Watch with different event lists for a single
// watchpoint expands its event set. The only way to shrink it, is to call
// Stop on its channel.
//
// Calling Watch with empty event list does expand nor shrink watchpoint's event
// set. If c is the first channel to listen for events on the given path, Watch
// will seamlessly create a watch on the filesystem.
//
// Notify dispatches copies of single filesystem event to all channels registered
// for each path. If a single filesystem event contains multiple coalesced events,
// each of them is dispatched separately. E.g. the following filesystem change:
//
// ~ $ echo Hello > Notify.txt
//
// dispatches two events - notify.Create and notify.Write. However, it may depend
// on the underlying watcher implementation whether OS reports both of them.
//
// Windows and recursive watches
//
// If a directory which path was used to create recursive watch under Windows
// gets deleted, the OS will not report such event. It is advised to keep in
// mind this limitation while setting recursive watchpoints for your application,
// e.g. use persistant paths like %userprofile% or watch additionally parent
// directory of a recursive watchpoint in order to receive delete events for it.
func Watch(path string, c chan<- EventInfo, events ...Event) error {
return defaultTree.Watch(path, c, events...)
}
// Stop removes all watchpoints registered for c. All underlying watches are
// also removed, for which c was the last channel listening for events.
//
// Stop does not close c. When Stop returns, it is guranteed that c will
// receive no more signals.
func Stop(c chan<- EventInfo) {
defaultTree.Stop(c)
}

View File

@ -0,0 +1,22 @@
// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
// Use of this source code is governed by the MIT license that can be
// found in the LICENSE file.
package notify
const buffer = 128
type tree interface {
Watch(string, chan<- EventInfo, ...Event) error
Stop(chan<- EventInfo)
Close() error
}
func newTree() tree {
c := make(chan EventInfo, buffer)
w := newWatcher(c)
if rw, ok := w.(recursiveWatcher); ok {
return newRecursiveTree(rw, c)
}
return newNonrecursiveTree(w, c, make(chan EventInfo, buffer))
}

View File

@ -0,0 +1,292 @@
// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
// Use of this source code is governed by the MIT license that can be
// found in the LICENSE file.
package notify
import "sync"
// nonrecursiveTree TODO(rjeczalik)
type nonrecursiveTree struct {
rw sync.RWMutex // protects root
root root
w watcher
c chan EventInfo
rec chan EventInfo
}
// newNonrecursiveTree TODO(rjeczalik)
func newNonrecursiveTree(w watcher, c, rec chan EventInfo) *nonrecursiveTree {
if rec == nil {
rec = make(chan EventInfo, buffer)
}
t := &nonrecursiveTree{
root: root{nd: newnode("")},
w: w,
c: c,
rec: rec,
}
go t.dispatch(c)
go t.internal(rec)
return t
}
// dispatch TODO(rjeczalik)
func (t *nonrecursiveTree) dispatch(c <-chan EventInfo) {
for ei := range c {
dbgprintf("dispatching %v on %q", ei.Event(), ei.Path())
go func(ei EventInfo) {
var nd node
var isrec bool
dir, base := split(ei.Path())
fn := func(it node, isbase bool) error {
isrec = isrec || it.Watch.IsRecursive()
if isbase {
nd = it
} else {
it.Watch.Dispatch(ei, recursive)
}
return nil
}
t.rw.RLock()
// Notify recursive watchpoints found on the path.
if err := t.root.WalkPath(dir, fn); err != nil {
dbgprint("dispatch did not reach leaf:", err)
t.rw.RUnlock()
return
}
// Notify parent watchpoint.
nd.Watch.Dispatch(ei, 0)
isrec = isrec || nd.Watch.IsRecursive()
// If leaf watchpoint exists, notify it.
if nd, ok := nd.Child[base]; ok {
isrec = isrec || nd.Watch.IsRecursive()
nd.Watch.Dispatch(ei, 0)
}
t.rw.RUnlock()
// If the event describes newly leaf directory created within
if !isrec || ei.Event() != Create {
return
}
if ok, err := ei.(isDirer).isDir(); !ok || err != nil {
return
}
t.rec <- ei
}(ei)
}
}
// internal TODO(rjeczalik)
func (t *nonrecursiveTree) internal(rec <-chan EventInfo) {
for ei := range rec {
var nd node
var eset = internal
t.rw.Lock()
t.root.WalkPath(ei.Path(), func(it node, _ bool) error {
if e := it.Watch[t.rec]; e != 0 && e > eset {
eset = e
}
nd = it
return nil
})
if eset == internal {
t.rw.Unlock()
continue
}
err := nd.Add(ei.Path()).AddDir(t.recFunc(eset))
t.rw.Unlock()
if err != nil {
dbgprintf("internal(%p) error: %v", rec, err)
}
}
}
// watchAdd TODO(rjeczalik)
func (t *nonrecursiveTree) watchAdd(nd node, c chan<- EventInfo, e Event) eventDiff {
if e&recursive != 0 {
diff := nd.Watch.Add(t.rec, e|Create|omit)
nd.Watch.Add(c, e)
return diff
}
return nd.Watch.Add(c, e)
}
// watchDelMin TODO(rjeczalik)
func (t *nonrecursiveTree) watchDelMin(min Event, nd node, c chan<- EventInfo, e Event) eventDiff {
old, ok := nd.Watch[t.rec]
if ok {
nd.Watch[t.rec] = min
}
diff := nd.Watch.Del(c, e)
if ok {
switch old &^= diff[0] &^ diff[1]; {
case old|internal == internal:
delete(nd.Watch, t.rec)
if set, ok := nd.Watch[nil]; ok && len(nd.Watch) == 1 && set == 0 {
delete(nd.Watch, nil)
}
default:
nd.Watch.Add(t.rec, old|Create)
switch {
case diff == none:
case diff[1]|Create == diff[0]:
diff = none
default:
diff[1] |= Create
}
}
}
return diff
}
// watchDel TODO(rjeczalik)
func (t *nonrecursiveTree) watchDel(nd node, c chan<- EventInfo, e Event) eventDiff {
return t.watchDelMin(0, nd, c, e)
}
// Watch TODO(rjeczalik)
func (t *nonrecursiveTree) Watch(path string, c chan<- EventInfo, events ...Event) error {
if c == nil {
panic("notify: Watch using nil channel")
}
// Expanding with empty event set is a nop.
if len(events) == 0 {
return nil
}
path, isrec, err := cleanpath(path)
if err != nil {
return err
}
eset := joinevents(events)
t.rw.Lock()
defer t.rw.Unlock()
nd := t.root.Add(path)
if isrec {
return t.watchrec(nd, c, eset|recursive)
}
return t.watch(nd, c, eset)
}
func (t *nonrecursiveTree) watch(nd node, c chan<- EventInfo, e Event) (err error) {
diff := nd.Watch.Add(c, e)
switch {
case diff == none:
return nil
case diff[1] == 0:
// TODO(rjeczalik): cleanup this panic after implementation is stable
panic("eset is empty: " + nd.Name)
case diff[0] == 0:
err = t.w.Watch(nd.Name, diff[1])
default:
err = t.w.Rewatch(nd.Name, diff[0], diff[1])
}
if err != nil {
nd.Watch.Del(c, diff.Event())
return err
}
return nil
}
func (t *nonrecursiveTree) recFunc(e Event) walkFunc {
return func(nd node) error {
switch diff := nd.Watch.Add(t.rec, e|omit|Create); {
case diff == none:
case diff[1] == 0:
// TODO(rjeczalik): cleanup this panic after implementation is stable
panic("eset is empty: " + nd.Name)
case diff[0] == 0:
t.w.Watch(nd.Name, diff[1])
default:
t.w.Rewatch(nd.Name, diff[0], diff[1])
}
return nil
}
}
func (t *nonrecursiveTree) watchrec(nd node, c chan<- EventInfo, e Event) error {
var traverse func(walkFunc) error
// Non-recursive tree listens on Create event for every recursive
// watchpoint in order to automagically set a watch for every
// created directory.
switch diff := nd.Watch.dryAdd(t.rec, e|Create); {
case diff == none:
t.watchAdd(nd, c, e)
nd.Watch.Add(t.rec, e|omit|Create)
return nil
case diff[1] == 0:
// TODO(rjeczalik): cleanup this panic after implementation is stable
panic("eset is empty: " + nd.Name)
case diff[0] == 0:
// TODO(rjeczalik): BFS into directories and skip subtree as soon as first
// recursive watchpoint is encountered.
traverse = nd.AddDir
default:
traverse = nd.Walk
}
// TODO(rjeczalik): account every path that failed to be (re)watched
// and retry.
if err := traverse(t.recFunc(e)); err != nil {
return err
}
t.watchAdd(nd, c, e)
return nil
}
type walkWatchpointFunc func(Event, node) error
func (t *nonrecursiveTree) walkWatchpoint(nd node, fn walkWatchpointFunc) error {
type minode struct {
min Event
nd node
}
mnd := minode{nd: nd}
stack := []minode{mnd}
Traverse:
for n := len(stack); n != 0; n = len(stack) {
mnd, stack = stack[n-1], stack[:n-1]
// There must be no recursive watchpoints if the node has no watchpoints
// itself (every node in subtree rooted at recursive watchpoints must
// have at least nil (total) and t.rec watchpoints).
if len(mnd.nd.Watch) != 0 {
switch err := fn(mnd.min, mnd.nd); err {
case nil:
case errSkip:
continue Traverse
default:
return err
}
}
for _, nd := range mnd.nd.Child {
stack = append(stack, minode{mnd.nd.Watch[t.rec], nd})
}
}
return nil
}
// Stop TODO(rjeczalik)
func (t *nonrecursiveTree) Stop(c chan<- EventInfo) {
fn := func(min Event, nd node) error {
// TODO(rjeczalik): aggregate watcher errors and retry; in worst case
// forward to the user.
switch diff := t.watchDelMin(min, nd, c, all); {
case diff == none:
return nil
case diff[1] == 0:
t.w.Unwatch(nd.Name)
default:
t.w.Rewatch(nd.Name, diff[0], diff[1])
}
return nil
}
t.rw.Lock()
err := t.walkWatchpoint(t.root.nd, fn) // TODO(rjeczalik): store max root per c
t.rw.Unlock()
dbgprintf("Stop(%p) error: %v\n", c, err)
}
// Close TODO(rjeczalik)
func (t *nonrecursiveTree) Close() error {
err := t.w.Close()
close(t.c)
return err
}

View File

@ -0,0 +1,354 @@
// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
// Use of this source code is governed by the MIT license that can be
// found in the LICENSE file.
package notify
import "sync"
// watchAdd TODO(rjeczalik)
func watchAdd(nd node, c chan<- EventInfo, e Event) eventDiff {
diff := nd.Watch.Add(c, e)
if wp := nd.Child[""].Watch; len(wp) != 0 {
e = wp.Total()
diff[0] |= e
diff[1] |= e
if diff[0] == diff[1] {
return none
}
}
return diff
}
// watchAddInactive TODO(rjeczalik)
func watchAddInactive(nd node, c chan<- EventInfo, e Event) eventDiff {
wp := nd.Child[""].Watch
if wp == nil {
wp = make(watchpoint)
nd.Child[""] = node{Watch: wp}
}
diff := wp.Add(c, e)
e = nd.Watch.Total()
diff[0] |= e
diff[1] |= e
if diff[0] == diff[1] {
return none
}
return diff
}
// watchCopy TODO(rjeczalik)
func watchCopy(src, dst node) {
for c, e := range src.Watch {
if c == nil {
continue
}
watchAddInactive(dst, c, e)
}
if wpsrc := src.Child[""].Watch; len(wpsrc) != 0 {
wpdst := dst.Child[""].Watch
for c, e := range wpsrc {
if c == nil {
continue
}
wpdst.Add(c, e)
}
}
}
// watchDel TODO(rjeczalik)
func watchDel(nd node, c chan<- EventInfo, e Event) eventDiff {
diff := nd.Watch.Del(c, e)
if wp := nd.Child[""].Watch; len(wp) != 0 {
diffInactive := wp.Del(c, e)
e = wp.Total()
// TODO(rjeczalik): add e if e != all?
diff[0] |= diffInactive[0] | e
diff[1] |= diffInactive[1] | e
if diff[0] == diff[1] {
return none
}
}
return diff
}
// watchTotal TODO(rjeczalik)
func watchTotal(nd node) Event {
e := nd.Watch.Total()
if wp := nd.Child[""].Watch; len(wp) != 0 {
e |= wp.Total()
}
return e
}
// watchIsRecursive TODO(rjeczalik)
func watchIsRecursive(nd node) bool {
ok := nd.Watch.IsRecursive()
// TODO(rjeczalik): add a test for len(wp) != 0 change the condition.
if wp := nd.Child[""].Watch; len(wp) != 0 {
// If a watchpoint holds inactive watchpoints, it means it's a parent
// one, which is recursive by nature even though it may be not recursive
// itself.
ok = true
}
return ok
}
// recursiveTree TODO(rjeczalik)
type recursiveTree struct {
rw sync.RWMutex // protects root
root root
// TODO(rjeczalik): merge watcher + recursiveWatcher after #5 and #6
w interface {
watcher
recursiveWatcher
}
c chan EventInfo
}
// newRecursiveTree TODO(rjeczalik)
func newRecursiveTree(w recursiveWatcher, c chan EventInfo) *recursiveTree {
t := &recursiveTree{
root: root{nd: newnode("")},
w: struct {
watcher
recursiveWatcher
}{w.(watcher), w},
c: c,
}
go t.dispatch()
return t
}
// dispatch TODO(rjeczalik)
func (t *recursiveTree) dispatch() {
for ei := range t.c {
dbgprintf("dispatching %v on %q", ei.Event(), ei.Path())
go func(ei EventInfo) {
nd, ok := node{}, false
dir, base := split(ei.Path())
fn := func(it node, isbase bool) error {
if isbase {
nd = it
} else {
it.Watch.Dispatch(ei, recursive)
}
return nil
}
t.rw.RLock()
defer t.rw.RUnlock()
// Notify recursive watchpoints found on the path.
if err := t.root.WalkPath(dir, fn); err != nil {
dbgprint("dispatch did not reach leaf:", err)
return
}
// Notify parent watchpoint.
nd.Watch.Dispatch(ei, 0)
// If leaf watchpoint exists, notify it.
if nd, ok = nd.Child[base]; ok {
nd.Watch.Dispatch(ei, 0)
}
}(ei)
}
}
// Watch TODO(rjeczalik)
func (t *recursiveTree) Watch(path string, c chan<- EventInfo, events ...Event) error {
if c == nil {
panic("notify: Watch using nil channel")
}
// Expanding with empty event set is a nop.
if len(events) == 0 {
return nil
}
path, isrec, err := cleanpath(path)
if err != nil {
return err
}
eventset := joinevents(events)
if isrec {
eventset |= recursive
}
t.rw.Lock()
defer t.rw.Unlock()
// case 1: cur is a child
//
// Look for parent watch which already covers the given path.
parent := node{}
self := false
err = t.root.WalkPath(path, func(nd node, isbase bool) error {
if watchTotal(nd) != 0 {
parent = nd
self = isbase
return errSkip
}
return nil
})
cur := t.root.Add(path) // add after the walk, so it's less to traverse
if err == nil && parent.Watch != nil {
// Parent watch found. Register inactive watchpoint, so we have enough
// information to shrink the eventset on eventual Stop.
// return t.resetwatchpoint(parent, parent, c, eventset|inactive)
var diff eventDiff
if self {
diff = watchAdd(cur, c, eventset)
} else {
diff = watchAddInactive(parent, c, eventset)
}
switch {
case diff == none:
// the parent watchpoint already covers requested subtree with its
// eventset
case diff[0] == 0:
// TODO(rjeczalik): cleanup this panic after implementation is stable
panic("dangling watchpoint: " + parent.Name)
default:
if isrec || watchIsRecursive(parent) {
err = t.w.RecursiveRewatch(parent.Name, parent.Name, diff[0], diff[1])
} else {
err = t.w.Rewatch(parent.Name, diff[0], diff[1])
}
if err != nil {
watchDel(parent, c, diff.Event())
return err
}
watchAdd(cur, c, eventset)
// TODO(rjeczalik): account top-most path for c
return nil
}
if !self {
watchAdd(cur, c, eventset)
}
return nil
}
// case 2: cur is new parent
//
// Look for children nodes, unwatch n-1 of them and rewatch the last one.
var children []node
fn := func(nd node) error {
if len(nd.Watch) == 0 {
return nil
}
children = append(children, nd)
return errSkip
}
switch must(cur.Walk(fn)); len(children) {
case 0:
// no child watches, cur holds a new watch
case 1:
watchAdd(cur, c, eventset) // TODO(rjeczalik): update cache c subtree root?
watchCopy(children[0], cur)
err = t.w.RecursiveRewatch(children[0].Name, cur.Name, watchTotal(children[0]),
watchTotal(cur))
if err != nil {
// Clean inactive watchpoint. The c chan did not exist before.
cur.Child[""] = node{}
delete(cur.Watch, c)
return err
}
return nil
default:
watchAdd(cur, c, eventset)
// Copy children inactive watchpoints to the new parent.
for _, nd := range children {
watchCopy(nd, cur)
}
// Watch parent subtree.
if err = t.w.RecursiveWatch(cur.Name, watchTotal(cur)); err != nil {
// Clean inactive watchpoint. The c chan did not exist before.
cur.Child[""] = node{}
delete(cur.Watch, c)
return err
}
// Unwatch children subtrees.
var e error
for _, nd := range children {
if watchIsRecursive(nd) {
e = t.w.RecursiveUnwatch(nd.Name)
} else {
e = t.w.Unwatch(nd.Name)
}
if e != nil {
err = nonil(err, e)
// TODO(rjeczalik): child is still watched, warn all its watchpoints
// about possible duplicate events via Error event
}
}
return err
}
// case 3: cur is new, alone node
switch diff := watchAdd(cur, c, eventset); {
case diff == none:
// TODO(rjeczalik): cleanup this panic after implementation is stable
panic("watch requested but no parent watchpoint found: " + cur.Name)
case diff[0] == 0:
if isrec {
err = t.w.RecursiveWatch(cur.Name, diff[1])
} else {
err = t.w.Watch(cur.Name, diff[1])
}
if err != nil {
watchDel(cur, c, diff.Event())
return err
}
default:
// TODO(rjeczalik): cleanup this panic after implementation is stable
panic("watch requested but no parent watchpoint found: " + cur.Name)
}
return nil
}
// Stop TODO(rjeczalik)
//
// TODO(rjeczalik): Split parent watchpoint - transfer watches to children
// if parent is no longer needed. This carries a risk that underlying
// watcher calls could fail - reconsider if it's worth the effort.
func (t *recursiveTree) Stop(c chan<- EventInfo) {
var err error
fn := func(nd node) (e error) {
diff := watchDel(nd, c, all)
switch {
case diff == none && watchTotal(nd) == 0:
// TODO(rjeczalik): There's no watchpoints deeper in the tree,
// probably we should remove the nodes as well.
return nil
case diff == none:
// Removing c from nd does not require shrinking its eventset.
case diff[1] == 0:
if watchIsRecursive(nd) {
e = t.w.RecursiveUnwatch(nd.Name)
} else {
e = t.w.Unwatch(nd.Name)
}
default:
if watchIsRecursive(nd) {
e = t.w.RecursiveRewatch(nd.Name, nd.Name, diff[0], diff[1])
} else {
e = t.w.Rewatch(nd.Name, diff[0], diff[1])
}
}
fn := func(nd node) error {
watchDel(nd, c, all)
return nil
}
err = nonil(err, e, nd.Walk(fn))
// TODO(rjeczalik): if e != nil store dummy chan in nd.Watch just to
// retry un/rewatching next time and/or let the user handle the failure
// vie Error event?
return errSkip
}
t.rw.Lock()
e := t.root.Walk("", fn) // TODO(rjeczalik): use max root per c
t.rw.Unlock()
if e != nil {
err = nonil(err, e)
}
dbgprintf("Stop(%p) error: %v\n", c, err)
}
// Close TODO(rjeczalik)
func (t *recursiveTree) Close() error {
err := t.w.Close()
close(t.c)
return err
}

View File

@ -0,0 +1,150 @@
// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
// Use of this source code is governed by the MIT license that can be
// found in the LICENSE file.
package notify
import (
"errors"
"os"
"path/filepath"
"strings"
)
const all = ^Event(0)
const sep = string(os.PathSeparator)
var errDepth = errors.New("exceeded allowed iteration count (circular symlink?)")
func min(i, j int) int {
if i > j {
return j
}
return i
}
func max(i, j int) int {
if i < j {
return j
}
return i
}
// must panics if err is non-nil.
func must(err error) {
if err != nil {
panic(err)
}
}
// nonil gives first non-nil error from the given arguments.
func nonil(err ...error) error {
for _, err := range err {
if err != nil {
return err
}
}
return nil
}
func cleanpath(path string) (realpath string, isrec bool, err error) {
if strings.HasSuffix(path, "...") {
isrec = true
path = path[:len(path)-3]
}
if path, err = filepath.Abs(path); err != nil {
return "", false, err
}
if path, err = canonical(path); err != nil {
return "", false, err
}
return path, isrec, nil
}
// canonical resolves any symlink in the given path and returns it in a clean form.
// It expects the path to be absolute. It fails to resolve circular symlinks by
// maintaining a simple iteration limit.
func canonical(p string) (string, error) {
p, err := filepath.Abs(p)
if err != nil {
return "", err
}
for i, j, depth := 1, 0, 1; i < len(p); i, depth = i+1, depth+1 {
if depth > 128 {
return "", &os.PathError{Op: "canonical", Path: p, Err: errDepth}
}
if j = strings.IndexRune(p[i:], '/'); j == -1 {
j, i = i, len(p)
} else {
j, i = i, i+j
}
fi, err := os.Lstat(p[:i])
if err != nil {
return "", err
}
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
s, err := os.Readlink(p[:i])
if err != nil {
return "", err
}
if filepath.IsAbs(s) {
p = "/" + s + p[i:]
} else {
p = p[:j] + s + p[i:]
}
i = 1 // no guarantee s is canonical, start all over
}
}
return filepath.Clean(p), nil
}
func joinevents(events []Event) (e Event) {
if len(events) == 0 {
e = All
} else {
for _, event := range events {
e |= event
}
}
return
}
func split(s string) (string, string) {
if i := lastIndexSep(s); i != -1 {
return s[:i], s[i+1:]
}
return "", s
}
func base(s string) string {
if i := lastIndexSep(s); i != -1 {
return s[i+1:]
}
return s
}
func indexbase(root, name string) int {
if n, m := len(root), len(name); m >= n && name[:n] == root &&
(n == m || name[n] == os.PathSeparator) {
return min(n+1, m)
}
return -1
}
func indexSep(s string) int {
for i := 0; i < len(s); i++ {
if s[i] == os.PathSeparator {
return i
}
}
return -1
}
func lastIndexSep(s string) int {
for i := len(s) - 1; i >= 0; i-- {
if s[i] == os.PathSeparator {
return i
}
}
return -1
}

View File

@ -0,0 +1,85 @@
// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
// Use of this source code is governed by the MIT license that can be
// found in the LICENSE file.
package notify
import "errors"
var (
errAlreadyWatched = errors.New("path is already watched")
errNotWatched = errors.New("path is not being watched")
errInvalidEventSet = errors.New("invalid event set provided")
)
// Watcher is a intermediate interface for wrapping inotify, ReadDirChangesW,
// FSEvents, kqueue and poller implementations.
//
// The watcher implementation is expected to do its own mapping between paths and
// create watchers if underlying event notification does not support it. For
// the ease of implementation it is guaranteed that paths provided via Watch and
// Unwatch methods are absolute and clean.
type watcher interface {
// Watch requests a watcher creation for the given path and given event set.
Watch(path string, event Event) error
// Unwatch requests a watcher deletion for the given path and given event set.
Unwatch(path string) error
// Rewatch provides a functionality for modifying existing watch-points, like
// expanding its event set.
//
// Rewatch modifies existing watch-point under for the given path. It passes
// the existing event set currently registered for the given path, and the
// new, requested event set.
//
// It is guaranteed that Tree will not pass to Rewatch zero value for any
// of its arguments. If old == new and watcher can be upgraded to
// recursiveWatcher interface, a watch for the corresponding path is expected
// to be changed from recursive to the non-recursive one.
Rewatch(path string, old, new Event) error
// Close unwatches all paths that are registered. When Close returns, it
// is expected it will report no more events.
Close() error
}
// RecursiveWatcher is an interface for a Watcher for those OS, which do support
// recursive watching over directories.
type recursiveWatcher interface {
RecursiveWatch(path string, event Event) error
// RecursiveUnwatch removes a recursive watch-point given by the path. For
// native recursive implementation there is no difference in functionality
// between Unwatch and RecursiveUnwatch, however for those platforms, that
// requires emulation for recursive watch-points, the implementation differs.
RecursiveUnwatch(path string) error
// RecursiveRewatcher provides a functionality for modifying and/or relocating
// existing recursive watch-points.
//
// To relocate a watch-point means to unwatch oldpath and set a watch-point on
// newpath.
//
// To modify a watch-point means either to expand or shrink its event set.
//
// Tree can want to either relocate, modify or relocate and modify a watch-point
// via single RecursiveRewatch call.
//
// If oldpath == newpath, the watch-point is expected to change its event set value
// from oldevent to newevent.
//
// If oldevent == newevent, the watch-point is expected to relocate from oldpath
// to the newpath.
//
// If oldpath != newpath and oldevent != newevent, the watch-point is expected
// to relocate from oldpath to the newpath first and then change its event set
// value from oldevent to the newevent. In other words the end result must be
// a watch-point set on newpath with newevent value of its event set.
//
// It is guaranteed that Tree will not pass to RecurisveRewatcha zero value
// for any of its arguments. If oldpath == newpath and oldevent == newevent,
// a watch for the corresponding path is expected to be changed for
// non-recursive to the recursive one.
RecursiveRewatch(oldpath, newpath string, oldevent, newevent Event) error
}

View File

@ -0,0 +1,170 @@
// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
// Use of this source code is governed by the MIT license that can be
// found in the LICENSE file.
// +build solaris
package notify
import (
"fmt"
"os"
"syscall"
)
// newTrigger returns implementation of trigger.
func newTrigger(pthLkp map[string]*watched) trigger {
return &fen{
pthLkp: pthLkp,
cf: newCfen(),
}
}
// fen is a structure implementing trigger for FEN.
type fen struct {
// p is a FEN port identifier
p int
// pthLkp is a structure mapping monitored files/dir with data about them,
// shared with parent trg structure
pthLkp map[string]*watched
// cf wraps C operations for FEN
cf cfen
}
// watched is a data structure representing watched file/directory.
type watched struct {
// p is a path to watched file/directory
p string
// fi provides information about watched file/dir
fi os.FileInfo
// eDir represents events watched directly
eDir Event
// eNonDir represents events watched indirectly
eNonDir Event
}
// Stop implements trigger.
func (f *fen) Stop() error {
return f.cf.port_alert(f.p)
}
// Close implements trigger.
func (f *fen) Close() (err error) {
return syscall.Close(f.p)
}
// NewWatched implements trigger.
func (*fen) NewWatched(p string, fi os.FileInfo) (*watched, error) {
return &watched{p: p, fi: fi}, nil
}
// Record implements trigger.
func (f *fen) Record(w *watched) {
f.pthLkp[w.p] = w
}
// Del implements trigger.
func (f *fen) Del(w *watched) {
delete(f.pthLkp, w.p)
}
func inter2pe(n interface{}) PortEvent {
pe, ok := n.(PortEvent)
if !ok {
panic(fmt.Sprintf("fen: type should be PortEvent, %T instead", n))
}
return pe
}
// Watched implements trigger.
func (f *fen) Watched(n interface{}) (*watched, int64, error) {
pe := inter2pe(n)
fo, ok := pe.PortevObject.(*FileObj)
if !ok || fo == nil {
panic(fmt.Sprintf("fen: type should be *FileObj, %T instead", fo))
}
w, ok := f.pthLkp[fo.Name]
if !ok {
return nil, 0, errNotWatched
}
return w, int64(pe.PortevEvents), nil
}
// init initializes FEN.
func (f *fen) Init() (err error) {
f.p, err = f.cf.port_create()
return
}
func fi2fo(fi os.FileInfo, p string) FileObj {
st, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
panic(fmt.Sprintf("fen: type should be *syscall.Stat_t, %T instead", st))
}
return FileObj{Name: p, Atim: st.Atim, Mtim: st.Mtim, Ctim: st.Ctim}
}
// Unwatch implements trigger.
func (f *fen) Unwatch(w *watched) error {
return f.cf.port_dissociate(f.p, FileObj{Name: w.p})
}
// Watch implements trigger.
func (f *fen) Watch(fi os.FileInfo, w *watched, e int64) error {
return f.cf.port_associate(f.p, fi2fo(fi, w.p), int(e))
}
// Wait implements trigger.
func (f *fen) Wait() (interface{}, error) {
var (
pe PortEvent
err error
)
err = f.cf.port_get(f.p, &pe)
return pe, err
}
// IsStop implements trigger.
func (f *fen) IsStop(n interface{}, err error) bool {
return err == syscall.EBADF || inter2pe(n).PortevSource == srcAlert
}
func init() {
encode = func(e Event) (o int64) {
// Create event is not supported by FEN. Instead FileModified event will
// be registered. If this event will be reported on dir which is to be
// monitored for Create, dir will be rescanned and Create events will
// be generated and returned for new files. In case of files,
// if not requested FileModified event is reported, it will be ignored.
if e&Create != 0 {
o = (o &^ int64(Create)) | int64(FileModified)
}
if e&Write != 0 {
o = (o &^ int64(Write)) | int64(FileModified)
}
// Following events are 'exception events' and as such cannot be requested
// explicitly for monitoring or filtered out. If the will be reported
// by FEN and not subscribed with by user, they will be filtered out by
// watcher's logic.
o &= int64(^Rename & ^Remove &^ FileDelete &^ FileRenameTo &^
FileRenameFrom &^ Unmounted &^ MountedOver)
return
}
nat2not = map[Event]Event{
FileModified: Write,
FileRenameFrom: Rename,
FileDelete: Remove,
FileAccess: Event(0),
FileAttrib: Event(0),
FileRenameTo: Event(0),
FileTrunc: Event(0),
FileNoFollow: Event(0),
Unmounted: Event(0),
MountedOver: Event(0),
}
not2nat = map[Event]Event{
Write: FileModified,
Rename: FileRenameFrom,
Remove: FileDelete,
}
}

View File

@ -0,0 +1,141 @@
// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
// Use of this source code is governed by the MIT license that can be
// found in the LICENSE file.
// +build solaris
package notify
// #include <port.h>
// #include <stdio.h>
// #include <stdlib.h>
// struct file_obj* newFo() { return (struct file_obj*) malloc(sizeof(struct file_obj)); }
// port_event_t* newPe() { return (port_event_t*) malloc(sizeof(port_event_t)); }
// uintptr_t conv(struct file_obj* fo) { return (uintptr_t) fo; }
// struct file_obj* dconv(uintptr_t fo) { return (struct file_obj*) fo; }
import "C"
import (
"syscall"
"unsafe"
)
const (
fileAccess = Event(C.FILE_ACCESS)
fileModified = Event(C.FILE_MODIFIED)
fileAttrib = Event(C.FILE_ATTRIB)
fileDelete = Event(C.FILE_DELETE)
fileRenameTo = Event(C.FILE_RENAME_TO)
fileRenameFrom = Event(C.FILE_RENAME_FROM)
fileTrunc = Event(C.FILE_TRUNC)
fileNoFollow = Event(C.FILE_NOFOLLOW)
unmounted = Event(C.UNMOUNTED)
mountedOver = Event(C.MOUNTEDOVER)
)
// PortEvent is a notify's equivalent of port_event_t.
type PortEvent struct {
PortevEvents int // PortevEvents is an equivalent of portev_events.
PortevSource uint8 // PortevSource is an equivalent of portev_source.
PortevPad uint8 // Portevpad is an equivalent of portev_pad.
PortevObject interface{} // PortevObject is an equivalent of portev_object.
PortevUser uintptr // PortevUser is an equivalent of portev_user.
}
// FileObj is a notify's equivalent of file_obj.
type FileObj struct {
Atim syscall.Timespec // Atim is an equivalent of fo_atime.
Mtim syscall.Timespec // Mtim is an equivalent of fo_mtime.
Ctim syscall.Timespec // Ctim is an equivalent of fo_ctime.
Pad [3]uintptr // Pad is an equivalent of fo_pad.
Name string // Name is an equivalent of fo_name.
}
type cfen struct {
p2pe map[string]*C.port_event_t
p2fo map[string]*C.struct_file_obj
}
func newCfen() cfen {
return cfen{
p2pe: make(map[string]*C.port_event_t),
p2fo: make(map[string]*C.struct_file_obj),
}
}
func unix2C(sec int64, nsec int64) (C.time_t, C.long) {
return C.time_t(sec), C.long(nsec)
}
func (c *cfen) port_associate(p int, fo FileObj, e int) (err error) {
cfo := C.newFo()
cfo.fo_atime.tv_sec, cfo.fo_atime.tv_nsec = unix2C(fo.Atim.Unix())
cfo.fo_mtime.tv_sec, cfo.fo_mtime.tv_nsec = unix2C(fo.Mtim.Unix())
cfo.fo_ctime.tv_sec, cfo.fo_ctime.tv_nsec = unix2C(fo.Ctim.Unix())
cfo.fo_name = C.CString(fo.Name)
c.p2fo[fo.Name] = cfo
_, err = C.port_associate(C.int(p), srcFile, C.conv(cfo), C.int(e), nil)
return
}
func (c *cfen) port_dissociate(port int, fo FileObj) (err error) {
cfo, ok := c.p2fo[fo.Name]
if !ok {
return errNotWatched
}
_, err = C.port_dissociate(C.int(port), srcFile, C.conv(cfo))
C.free(unsafe.Pointer(cfo.fo_name))
C.free(unsafe.Pointer(cfo))
delete(c.p2fo, fo.Name)
return
}
const srcAlert = C.PORT_SOURCE_ALERT
const srcFile = C.PORT_SOURCE_FILE
const alertSet = C.PORT_ALERT_SET
func cfo2fo(cfo *C.struct_file_obj) *FileObj {
// Currently remaining attributes are not used.
if cfo == nil {
return nil
}
var fo FileObj
fo.Name = C.GoString(cfo.fo_name)
return &fo
}
func (c *cfen) port_get(port int, pe *PortEvent) (err error) {
cpe := C.newPe()
if _, err = C.port_get(C.int(port), cpe, nil); err != nil {
C.free(unsafe.Pointer(cpe))
return
}
pe.PortevEvents, pe.PortevSource, pe.PortevPad =
int(cpe.portev_events), uint8(cpe.portev_source), uint8(cpe.portev_pad)
pe.PortevObject = cfo2fo(C.dconv(cpe.portev_object))
pe.PortevUser = uintptr(cpe.portev_user)
C.free(unsafe.Pointer(cpe))
return
}
func (c *cfen) port_create() (int, error) {
p, err := C.port_create()
return int(p), err
}
func (c *cfen) port_alert(p int) (err error) {
_, err = C.port_alert(C.int(p), alertSet, C.int(666), nil)
return
}
func (c *cfen) free() {
for i := range c.p2fo {
C.free(unsafe.Pointer(c.p2fo[i].fo_name))
C.free(unsafe.Pointer(c.p2fo[i]))
}
for i := range c.p2pe {
C.free(unsafe.Pointer(c.p2pe[i]))
}
c.p2fo = make(map[string]*C.struct_file_obj)
c.p2pe = make(map[string]*C.port_event_t)
}

View File

@ -0,0 +1,319 @@
// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
// Use of this source code is governed by the MIT license that can be
// found in the LICENSE file.
// +build darwin,!kqueue
package notify
import (
"errors"
"strings"
"sync/atomic"
)
// TODO(rjeczalik): get rid of calls to canonical, it's tree responsibility
const (
failure = uint32(FSEventsMustScanSubDirs | FSEventsUserDropped | FSEventsKernelDropped)
filter = uint32(FSEventsCreated | FSEventsRemoved | FSEventsRenamed |
FSEventsModified | FSEventsInodeMetaMod)
)
// FSEvent represents single file event. It is created out of values passed by
// FSEvents to FSEventStreamCallback function.
type FSEvent struct {
Path string // real path of the file or directory
ID uint64 // ID of the event (FSEventStreamEventId)
Flags uint32 // joint FSEvents* flags (FSEventStreamEventFlags)
}
// splitflags separates event flags from single set into slice of flags.
func splitflags(set uint32) (e []uint32) {
for i := uint32(1); set != 0; i, set = i<<1, set>>1 {
if (set & 1) != 0 {
e = append(e, i)
}
}
return
}
// watch represents a filesystem watchpoint. It is a higher level abstraction
// over FSEvents' stream, which implements filtering of file events based
// on path and event set. It emulates non-recursive watch-point by filtering out
// events which paths are more than 1 level deeper than the watched path.
type watch struct {
// prev stores last event set per path in order to filter out old flags
// for new events, which appratenly FSEvents likes to retain. It's a disgusting
// hack, it should be researched how to get rid of it.
prev map[string]uint32
c chan<- EventInfo
stream *stream
path string
events uint32
isrec int32
flushed bool
}
// Example format:
//
// ~ $ (trigger command) # (event set) -> (effective event set)
//
// Heuristics:
//
// 1. Create event is removed when it was present in previous event set.
// Example:
//
// ~ $ echo > file # Create|Write -> Create|Write
// ~ $ echo > file # Create|Write|InodeMetaMod -> Write|InodeMetaMod
//
// 2. Remove event is removed if it was present in previouse event set.
// Example:
//
// ~ $ touch file # Create -> Create
// ~ $ rm file # Create|Remove -> Remove
// ~ $ touch file # Create|Remove -> Create
//
// 3. Write event is removed if not followed by InodeMetaMod on existing
// file. Example:
//
// ~ $ echo > file # Create|Write -> Create|Write
// ~ $ chmod +x file # Create|Write|ChangeOwner -> ChangeOwner
//
// 4. Write&InodeMetaMod is removed when effective event set contain Remove event.
// Example:
//
// ~ $ echo > file # Write|InodeMetaMod -> Write|InodeMetaMod
// ~ $ rm file # Remove|Write|InodeMetaMod -> Remove
//
func (w *watch) strip(base string, set uint32) uint32 {
const (
write = FSEventsModified | FSEventsInodeMetaMod
both = FSEventsCreated | FSEventsRemoved
)
switch w.prev[base] {
case FSEventsCreated:
set &^= FSEventsCreated
if set&FSEventsRemoved != 0 {
w.prev[base] = FSEventsRemoved
set &^= write
}
case FSEventsRemoved:
set &^= FSEventsRemoved
if set&FSEventsCreated != 0 {
w.prev[base] = FSEventsCreated
}
default:
switch set & both {
case FSEventsCreated:
w.prev[base] = FSEventsCreated
case FSEventsRemoved:
w.prev[base] = FSEventsRemoved
set &^= write
}
}
dbgprintf("split()=%v\n", Event(set))
return set
}
// Dispatch is a stream function which forwards given file events for the watched
// path to underlying FileInfo channel.
func (w *watch) Dispatch(ev []FSEvent) {
events := atomic.LoadUint32(&w.events)
isrec := (atomic.LoadInt32(&w.isrec) == 1)
for i := range ev {
if ev[i].Flags&FSEventsHistoryDone != 0 {
w.flushed = true
continue
}
if !w.flushed {
continue
}
dbgprintf("%v (0x%x) (%s, i=%d, ID=%d, len=%d)\n", Event(ev[i].Flags),
ev[i].Flags, ev[i].Path, i, ev[i].ID, len(ev))
if ev[i].Flags&failure != 0 {
// TODO(rjeczalik): missing error handling
panic("unhandled error: " + Event(ev[i].Flags).String())
}
if !strings.HasPrefix(ev[i].Path, w.path) {
continue
}
n := len(w.path)
base := ""
if len(ev[i].Path) > n {
if ev[i].Path[n] != '/' {
continue
}
base = ev[i].Path[n+1:]
if !isrec && strings.IndexByte(base, '/') != -1 {
continue
}
}
// TODO(rjeczalik): get diff only from filtered events?
e := w.strip(string(base), ev[i].Flags) & events
if e == 0 {
continue
}
for _, e := range splitflags(e) {
dbgprintf("%d: single event: %v", ev[i].ID, Event(e))
w.c <- &event{
fse: ev[i],
event: Event(e),
}
}
}
}
// Stop closes underlying FSEvents stream and stops dispatching events.
func (w *watch) Stop() {
w.stream.Stop()
// TODO(rjeczalik): make (*stream).Stop flush synchronously undelivered events,
// so the following hack can be removed. It should flush all the streams
// concurrently as we care not to block too much here.
atomic.StoreUint32(&w.events, 0)
atomic.StoreInt32(&w.isrec, 0)
}
// fsevents implements Watcher and RecursiveWatcher interfaces backed by FSEvents
// framework.
type fsevents struct {
watches map[string]*watch
c chan<- EventInfo
}
func newWatcher(c chan<- EventInfo) watcher {
return &fsevents{
watches: make(map[string]*watch),
c: c,
}
}
func (fse *fsevents) watch(path string, event Event, isrec int32) (err error) {
if path, err = canonical(path); err != nil {
return err
}
if _, ok := fse.watches[path]; ok {
return errAlreadyWatched
}
w := &watch{
prev: make(map[string]uint32),
c: fse.c,
path: path,
events: uint32(event),
isrec: isrec,
}
w.stream = newStream(path, w.Dispatch)
if err = w.stream.Start(); err != nil {
return err
}
fse.watches[path] = w
return nil
}
func (fse *fsevents) unwatch(path string) (err error) {
if path, err = canonical(path); err != nil {
return
}
w, ok := fse.watches[path]
if !ok {
return errNotWatched
}
w.stream.Stop()
delete(fse.watches, path)
return nil
}
// Watch implements Watcher interface. It fails with non-nil error when setting
// the watch-point by FSEvents fails or with errAlreadyWatched error when
// the given path is already watched.
func (fse *fsevents) Watch(path string, event Event) error {
return fse.watch(path, event, 0)
}
// Unwatch implements Watcher interface. It fails with errNotWatched when
// the given path is not being watched.
func (fse *fsevents) Unwatch(path string) error {
return fse.unwatch(path)
}
// Rewatch implements Watcher interface. It fails with errNotWatched when
// the given path is not being watched or with errInvalidEventSet when oldevent
// does not match event set the watch-point currently holds.
func (fse *fsevents) Rewatch(path string, oldevent, newevent Event) error {
w, ok := fse.watches[path]
if !ok {
return errNotWatched
}
if !atomic.CompareAndSwapUint32(&w.events, uint32(oldevent), uint32(newevent)) {
return errInvalidEventSet
}
atomic.StoreInt32(&w.isrec, 0)
return nil
}
// RecursiveWatch implements RecursiveWatcher interface. It fails with non-nil
// error when setting the watch-point by FSEvents fails or with errAlreadyWatched
// error when the given path is already watched.
func (fse *fsevents) RecursiveWatch(path string, event Event) error {
return fse.watch(path, event, 1)
}
// RecursiveUnwatch implements RecursiveWatcher interface. It fails with
// errNotWatched when the given path is not being watched.
//
// TODO(rjeczalik): fail if w.isrec == 0?
func (fse *fsevents) RecursiveUnwatch(path string) error {
return fse.unwatch(path)
}
// RecrusiveRewatch implements RecursiveWatcher interface. It fails:
//
// * with errNotWatched when the given path is not being watched
// * with errInvalidEventSet when oldevent does not match the current event set
// * with errAlreadyWatched when watch-point given by the oldpath was meant to
// be relocated to newpath, but the newpath is already watched
// * a non-nil error when setting the watch-point with FSEvents fails
//
// TODO(rjeczalik): Improve handling of watch-point relocation? See two TODOs
// that follows.
func (fse *fsevents) RecursiveRewatch(oldpath, newpath string, oldevent, newevent Event) error {
switch [2]bool{oldpath == newpath, oldevent == newevent} {
case [2]bool{true, true}:
w, ok := fse.watches[oldpath]
if !ok {
return errNotWatched
}
atomic.StoreInt32(&w.isrec, 1)
return nil
case [2]bool{true, false}:
w, ok := fse.watches[oldpath]
if !ok {
return errNotWatched
}
if !atomic.CompareAndSwapUint32(&w.events, uint32(oldevent), uint32(newevent)) {
return errors.New("invalid event state diff")
}
atomic.StoreInt32(&w.isrec, 1)
return nil
default:
// TODO(rjeczalik): rewatch newpath only if exists?
// TODO(rjeczalik): migrate w.prev to new watch?
if _, ok := fse.watches[newpath]; ok {
return errAlreadyWatched
}
if err := fse.Unwatch(oldpath); err != nil {
return err
}
// TODO(rjeczalik): revert unwatch if watch fails?
return fse.watch(newpath, newevent, 1)
}
}
// Close unwatches all watch-points.
func (fse *fsevents) Close() error {
for _, w := range fse.watches {
w.Stop()
}
fse.watches = nil
return nil
}

View File

@ -0,0 +1,190 @@
// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
// Use of this source code is governed by the MIT license that can be
// found in the LICENSE file.
// +build darwin,!kqueue
package notify
/*
#include <CoreServices/CoreServices.h>
typedef void (*CFRunLoopPerformCallBack)(void*);
void gosource(void *);
void gostream(uintptr_t, uintptr_t, size_t, uintptr_t, uintptr_t, uintptr_t);
static FSEventStreamRef EventStreamCreate(FSEventStreamContext * context, uintptr_t info, CFArrayRef paths, FSEventStreamEventId since, CFTimeInterval latency, FSEventStreamCreateFlags flags) {
context->info = (void*) info;
return FSEventStreamCreate(NULL, (FSEventStreamCallback) gostream, context, paths, since, latency, flags);
}
#cgo LDFLAGS: -framework CoreServices
*/
import "C"
import (
"errors"
"os"
"sync"
"sync/atomic"
"time"
"unsafe"
)
var nilstream C.FSEventStreamRef
// Default arguments for FSEventStreamCreate function.
var (
latency C.CFTimeInterval
flags = C.FSEventStreamCreateFlags(C.kFSEventStreamCreateFlagFileEvents | C.kFSEventStreamCreateFlagNoDefer)
since = uint64(C.FSEventsGetCurrentEventId())
)
var runloop C.CFRunLoopRef // global runloop which all streams are registered with
var wg sync.WaitGroup // used to wait until the runloop starts
// source is used for synchronization purposes - it signals when runloop has
// started and is ready via the wg. It also serves purpose of a dummy source,
// thanks to it the runloop does not return as it also has at least one source
// registered.
var source = C.CFRunLoopSourceCreate(nil, 0, &C.CFRunLoopSourceContext{
perform: (C.CFRunLoopPerformCallBack)(C.gosource),
})
// Errors returned when FSEvents functions fail.
var (
errCreate = os.NewSyscallError("FSEventStreamCreate", errors.New("NULL"))
errStart = os.NewSyscallError("FSEventStreamStart", errors.New("false"))
)
// initializes the global runloop and ensures any created stream awaits its
// readiness.
func init() {
wg.Add(1)
go func() {
runloop = C.CFRunLoopGetCurrent()
C.CFRunLoopAddSource(runloop, source, C.kCFRunLoopDefaultMode)
C.CFRunLoopRun()
panic("runloop has just unexpectedly stopped")
}()
C.CFRunLoopSourceSignal(source)
}
//export gosource
func gosource(unsafe.Pointer) {
time.Sleep(time.Second)
wg.Done()
}
//export gostream
func gostream(_, info uintptr, n C.size_t, paths, flags, ids uintptr) {
const (
offchar = unsafe.Sizeof((*C.char)(nil))
offflag = unsafe.Sizeof(C.FSEventStreamEventFlags(0))
offid = unsafe.Sizeof(C.FSEventStreamEventId(0))
)
if n == 0 {
return
}
ev := make([]FSEvent, 0, int(n))
for i := uintptr(0); i < uintptr(n); i++ {
switch flags := *(*uint32)(unsafe.Pointer((flags + i*offflag))); {
case flags&uint32(FSEventsEventIdsWrapped) != 0:
atomic.StoreUint64(&since, uint64(C.FSEventsGetCurrentEventId()))
default:
ev = append(ev, FSEvent{
Path: C.GoString(*(**C.char)(unsafe.Pointer(paths + i*offchar))),
Flags: flags,
ID: *(*uint64)(unsafe.Pointer(ids + i*offid)),
})
}
}
streamFuncs.get(info)(ev)
}
// StreamFunc is a callback called when stream receives file events.
type streamFunc func([]FSEvent)
var streamFuncs = streamFuncRegistry{m: map[uintptr]streamFunc{}}
type streamFuncRegistry struct {
mu sync.Mutex
m map[uintptr]streamFunc
i uintptr
}
func (r *streamFuncRegistry) get(id uintptr) streamFunc {
r.mu.Lock()
defer r.mu.Unlock()
return r.m[id]
}
func (r *streamFuncRegistry) add(fn streamFunc) uintptr {
r.mu.Lock()
defer r.mu.Unlock()
r.i++
r.m[r.i] = fn
return r.i
}
func (r *streamFuncRegistry) delete(id uintptr) {
r.mu.Lock()
defer r.mu.Unlock()
delete(r.m, id)
}
// Stream represents single watch-point which listens for events scheduled by
// the global runloop.
type stream struct {
path string
ref C.FSEventStreamRef
info uintptr
}
// NewStream creates a stream for given path, listening for file events and
// calling fn upon receving any.
func newStream(path string, fn streamFunc) *stream {
return &stream{
path: path,
info: streamFuncs.add(fn),
}
}
// Start creates a FSEventStream for the given path and schedules it with
// global runloop. It's a nop if the stream was already started.
func (s *stream) Start() error {
if s.ref != nilstream {
return nil
}
wg.Wait()
p := C.CFStringCreateWithCStringNoCopy(nil, C.CString(s.path), C.kCFStringEncodingUTF8, nil)
path := C.CFArrayCreate(nil, (*unsafe.Pointer)(unsafe.Pointer(&p)), 1, nil)
ctx := C.FSEventStreamContext{}
ref := C.EventStreamCreate(&ctx, C.uintptr_t(s.info), path, C.FSEventStreamEventId(atomic.LoadUint64(&since)), latency, flags)
if ref == nilstream {
return errCreate
}
C.FSEventStreamScheduleWithRunLoop(ref, runloop, C.kCFRunLoopDefaultMode)
if C.FSEventStreamStart(ref) == C.Boolean(0) {
C.FSEventStreamInvalidate(ref)
return errStart
}
C.CFRunLoopWakeUp(runloop)
s.ref = ref
return nil
}
// Stop stops underlying FSEventStream and unregisters it from global runloop.
func (s *stream) Stop() {
if s.ref == nilstream {
return
}
wg.Wait()
C.FSEventStreamStop(s.ref)
C.FSEventStreamInvalidate(s.ref)
C.CFRunLoopWakeUp(runloop)
s.ref = nilstream
streamFuncs.delete(s.info)
}

View File

@ -0,0 +1,396 @@
// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
// Use of this source code is governed by the MIT license that can be
// found in the LICENSE file.
// +build linux
package notify
import (
"bytes"
"errors"
"path/filepath"
"runtime"
"sync"
"sync/atomic"
"syscall"
"unsafe"
)
// eventBufferSize defines the size of the buffer given to read(2) function. One
// should not depend on this value, since it was arbitrary chosen and may be
// changed in the future.
const eventBufferSize = 64 * (syscall.SizeofInotifyEvent + syscall.PathMax + 1)
// consumersCount defines the number of consumers in producer-consumer based
// implementation. Each consumer is run in a separate goroutine and has read
// access to watched files map.
const consumersCount = 2
const invalidDescriptor = -1
// watched is a pair of file path and inotify mask used as a value in
// watched files map.
type watched struct {
path string
mask uint32
}
// inotify implements Watcher interface.
type inotify struct {
sync.RWMutex // protects inotify.m map
m map[int32]*watched // watch descriptor to watched object
fd int32 // inotify file descriptor
pipefd []int // pipe's read and write descriptors
epfd int // epoll descriptor
epes []syscall.EpollEvent // epoll events
buffer [eventBufferSize]byte // inotify event buffer
wg sync.WaitGroup // wait group used to close main loop
c chan<- EventInfo // event dispatcher channel
}
// NewWatcher creates new non-recursive inotify backed by inotify.
func newWatcher(c chan<- EventInfo) watcher {
i := &inotify{
m: make(map[int32]*watched),
fd: invalidDescriptor,
pipefd: []int{invalidDescriptor, invalidDescriptor},
epfd: invalidDescriptor,
epes: make([]syscall.EpollEvent, 0),
c: c,
}
runtime.SetFinalizer(i, func(i *inotify) {
i.epollclose()
if i.fd != invalidDescriptor {
syscall.Close(int(i.fd))
}
})
return i
}
// Watch implements notify.watcher interface.
func (i *inotify) Watch(path string, e Event) error {
return i.watch(path, e)
}
// Rewatch implements notify.watcher interface.
func (i *inotify) Rewatch(path string, _, newevent Event) error {
return i.watch(path, newevent)
}
// watch adds a new watcher to the set of watched objects or modifies the existing
// one. If called for the first time, this function initializes inotify filesystem
// monitor and starts producer-consumers goroutines.
func (i *inotify) watch(path string, e Event) (err error) {
if e&^(All|Event(syscall.IN_ALL_EVENTS)) != 0 {
return errors.New("notify: unknown event")
}
if err = i.lazyinit(); err != nil {
return
}
iwd, err := syscall.InotifyAddWatch(int(i.fd), path, encode(e))
if err != nil {
return
}
i.RLock()
wd := i.m[int32(iwd)]
i.RUnlock()
if wd == nil {
i.Lock()
if i.m[int32(iwd)] == nil {
i.m[int32(iwd)] = &watched{path: path, mask: uint32(e)}
}
i.Unlock()
} else {
i.Lock()
wd.mask = uint32(e)
i.Unlock()
}
return nil
}
// lazyinit sets up all required file descriptors and starts 1+consumersCount
// goroutines. The producer goroutine blocks until file-system notifications
// occur. Then, all events are read from system buffer and sent to consumer
// goroutines which construct valid notify events. This method uses
// Double-Checked Locking optimization.
func (i *inotify) lazyinit() error {
if atomic.LoadInt32(&i.fd) == invalidDescriptor {
i.Lock()
defer i.Unlock()
if atomic.LoadInt32(&i.fd) == invalidDescriptor {
fd, err := syscall.InotifyInit()
if err != nil {
return err
}
i.fd = int32(fd)
if err = i.epollinit(); err != nil {
_, _ = i.epollclose(), syscall.Close(int(fd)) // Ignore errors.
i.fd = invalidDescriptor
return err
}
esch := make(chan []*event)
go i.loop(esch)
i.wg.Add(consumersCount)
for n := 0; n < consumersCount; n++ {
go i.send(esch)
}
}
}
return nil
}
// epollinit opens an epoll file descriptor and creates a pipe which will be
// used to wake up the epoll_wait(2) function. Then, file descriptor associated
// with inotify event queue and the read end of the pipe are added to epoll set.
// Note that `fd` member must be set before this function is called.
func (i *inotify) epollinit() (err error) {
if i.epfd, err = syscall.EpollCreate1(0); err != nil {
return
}
if err = syscall.Pipe(i.pipefd); err != nil {
return
}
i.epes = []syscall.EpollEvent{
{Events: syscall.EPOLLIN, Fd: i.fd},
{Events: syscall.EPOLLIN, Fd: int32(i.pipefd[0])},
}
if err = syscall.EpollCtl(i.epfd, syscall.EPOLL_CTL_ADD, int(i.fd), &i.epes[0]); err != nil {
return
}
return syscall.EpollCtl(i.epfd, syscall.EPOLL_CTL_ADD, i.pipefd[0], &i.epes[1])
}
// epollclose closes the file descriptor created by the call to epoll_create(2)
// and two file descriptors opened by pipe(2) function.
func (i *inotify) epollclose() (err error) {
if i.epfd != invalidDescriptor {
if err = syscall.Close(i.epfd); err == nil {
i.epfd = invalidDescriptor
}
}
for n, fd := range i.pipefd {
if fd != invalidDescriptor {
switch e := syscall.Close(fd); {
case e != nil && err == nil:
err = e
case e == nil:
i.pipefd[n] = invalidDescriptor
}
}
}
return
}
// loop blocks until either inotify or pipe file descriptor is ready for I/O.
// All read operations triggered by filesystem notifications are forwarded to
// one of the event's consumers. If pipe fd became ready, loop function closes
// all file descriptors opened by lazyinit method and returns afterwards.
func (i *inotify) loop(esch chan<- []*event) {
epes := make([]syscall.EpollEvent, 1)
fd := atomic.LoadInt32(&i.fd)
for {
switch _, err := syscall.EpollWait(i.epfd, epes, -1); err {
case nil:
switch epes[0].Fd {
case fd:
esch <- i.read()
epes[0].Fd = 0
case int32(i.pipefd[0]):
i.Lock()
defer i.Unlock()
if err = syscall.Close(int(fd)); err != nil && err != syscall.EINTR {
panic("notify: close(2) error " + err.Error())
}
atomic.StoreInt32(&i.fd, invalidDescriptor)
if err = i.epollclose(); err != nil && err != syscall.EINTR {
panic("notify: epollclose error " + err.Error())
}
close(esch)
return
}
case syscall.EINTR:
continue
default: // We should never reach this line.
panic("notify: epoll_wait(2) error " + err.Error())
}
}
}
// read reads events from an inotify file descriptor. It does not handle errors
// returned from read(2) function since they are not critical to watcher logic.
func (i *inotify) read() (es []*event) {
n, err := syscall.Read(int(i.fd), i.buffer[:])
if err != nil || n < syscall.SizeofInotifyEvent {
return
}
var sys *syscall.InotifyEvent
nmin := n - syscall.SizeofInotifyEvent
for pos, path := 0, ""; pos <= nmin; {
sys = (*syscall.InotifyEvent)(unsafe.Pointer(&i.buffer[pos]))
pos += syscall.SizeofInotifyEvent
if path = ""; sys.Len > 0 {
endpos := pos + int(sys.Len)
path = string(bytes.TrimRight(i.buffer[pos:endpos], "\x00"))
pos = endpos
}
es = append(es, &event{
sys: syscall.InotifyEvent{
Wd: sys.Wd,
Mask: sys.Mask,
Cookie: sys.Cookie,
},
path: path,
})
}
return
}
// send is a consumer function which sends events to event dispatcher channel.
// It is run in a separate goroutine in order to not block loop method when
// possibly expensive write operations are performed on inotify map.
func (i *inotify) send(esch <-chan []*event) {
for es := range esch {
for _, e := range i.transform(es) {
if e != nil {
i.c <- e
}
}
}
i.wg.Done()
}
// transform prepares events read from inotify file descriptor for sending to
// user. It removes invalid events and these which are no longer present in
// inotify map. This method may also split one raw event into two different ones
// when system-dependent result is required.
func (i *inotify) transform(es []*event) []*event {
var multi []*event
i.RLock()
for idx, e := range es {
if e.sys.Mask&(syscall.IN_IGNORED|syscall.IN_Q_OVERFLOW) != 0 {
es[idx] = nil
continue
}
wd, ok := i.m[e.sys.Wd]
if !ok || e.sys.Mask&encode(Event(wd.mask)) == 0 {
es[idx] = nil
continue
}
if e.path == "" {
e.path = wd.path
} else {
e.path = filepath.Join(wd.path, e.path)
}
multi = append(multi, decode(Event(wd.mask), e))
if e.event == 0 {
es[idx] = nil
}
}
i.RUnlock()
es = append(es, multi...)
return es
}
// encode converts notify system-independent events to valid inotify mask
// which can be passed to inotify_add_watch(2) function.
func encode(e Event) uint32 {
if e&Create != 0 {
e = (e ^ Create) | InCreate | InMovedTo
}
if e&Remove != 0 {
e = (e ^ Remove) | InDelete | InDeleteSelf
}
if e&Write != 0 {
e = (e ^ Write) | InModify
}
if e&Rename != 0 {
e = (e ^ Rename) | InMovedFrom | InMoveSelf
}
return uint32(e)
}
// decode uses internally stored mask to distinguish whether system-independent
// or system-dependent event is requested. The first one is created by modifying
// `e` argument. decode method sets e.event value to 0 when an event should be
// skipped. System-dependent event is set as the function's return value which
// can be nil when the event should not be passed on.
func decode(mask Event, e *event) (syse *event) {
if sysmask := uint32(mask) & e.sys.Mask; sysmask != 0 {
syse = &event{sys: syscall.InotifyEvent{
Wd: e.sys.Wd,
Mask: e.sys.Mask,
Cookie: e.sys.Cookie,
}, event: Event(sysmask), path: e.path}
}
imask := encode(mask)
switch {
case mask&Create != 0 && imask&uint32(InCreate|InMovedTo)&e.sys.Mask != 0:
e.event = Create
case mask&Remove != 0 && imask&uint32(InDelete|InDeleteSelf)&e.sys.Mask != 0:
e.event = Remove
case mask&Write != 0 && imask&uint32(InModify)&e.sys.Mask != 0:
e.event = Write
case mask&Rename != 0 && imask&uint32(InMovedFrom|InMoveSelf)&e.sys.Mask != 0:
e.event = Rename
default:
e.event = 0
}
return
}
// Unwatch implements notify.watcher interface. It looks for watch descriptor
// related to registered path and if found, calls inotify_rm_watch(2) function.
// This method is allowed to return EINVAL error when concurrently requested to
// delete identical path.
func (i *inotify) Unwatch(path string) (err error) {
iwd := int32(invalidDescriptor)
i.RLock()
for iwdkey, wd := range i.m {
if wd.path == path {
iwd = iwdkey
break
}
}
i.RUnlock()
if iwd == invalidDescriptor {
return errors.New("notify: path " + path + " is already watched")
}
fd := atomic.LoadInt32(&i.fd)
if _, err = syscall.InotifyRmWatch(int(fd), uint32(iwd)); err != nil {
return
}
i.Lock()
delete(i.m, iwd)
i.Unlock()
return nil
}
// Close implements notify.watcher interface. It removes all existing watch
// descriptors and wakes up producer goroutine by sending data to the write end
// of the pipe. The function waits for a signal from producer which means that
// all operations on current monitoring instance are done.
func (i *inotify) Close() (err error) {
i.Lock()
if fd := atomic.LoadInt32(&i.fd); fd == invalidDescriptor {
i.Unlock()
return nil
}
for iwd := range i.m {
if _, e := syscall.InotifyRmWatch(int(i.fd), uint32(iwd)); e != nil && err == nil {
err = e
}
delete(i.m, iwd)
}
switch _, errwrite := syscall.Write(i.pipefd[1], []byte{0x00}); {
case errwrite != nil && err == nil:
err = errwrite
fallthrough
case errwrite != nil:
i.Unlock()
default:
i.Unlock()
i.wg.Wait()
}
return
}

View File

@ -0,0 +1,192 @@
// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
// Use of this source code is governed by the MIT license that can be
// found in the LICENSE file.
// +build darwin,kqueue dragonfly freebsd netbsd openbsd
package notify
import (
"fmt"
"os"
"syscall"
)
// newTrigger returns implementation of trigger.
func newTrigger(pthLkp map[string]*watched) trigger {
return &kq{
pthLkp: pthLkp,
idLkp: make(map[int]*watched),
}
}
// kq is a structure implementing trigger for kqueue.
type kq struct {
// fd is a kqueue file descriptor
fd int
// pipefds are file descriptors used to stop `Kevent` call.
pipefds [2]int
// idLkp is a data structure mapping file descriptors with data about watching
// represented by them files/directories.
idLkp map[int]*watched
// pthLkp is a structure mapping monitored files/dir with data about them,
// shared with parent trg structure
pthLkp map[string]*watched
}
// watched is a data structure representing watched file/directory.
type watched struct {
// p is a path to watched file/directory.
p string
// fd is a file descriptor for watched file/directory.
fd int
// fi provides information about watched file/dir.
fi os.FileInfo
// eDir represents events watched directly.
eDir Event
// eNonDir represents events watched indirectly.
eNonDir Event
}
// Stop implements trigger.
func (k *kq) Stop() (err error) {
// trigger event used to interrupt Kevent call.
_, err = syscall.Write(k.pipefds[1], []byte{0x00})
return
}
// Close implements trigger.
func (k *kq) Close() error {
return syscall.Close(k.fd)
}
// NewWatched implements trigger.
func (*kq) NewWatched(p string, fi os.FileInfo) (*watched, error) {
fd, err := syscall.Open(p, syscall.O_NONBLOCK|syscall.O_RDONLY, 0)
if err != nil {
return nil, err
}
return &watched{fd: fd, p: p, fi: fi}, nil
}
// Record implements trigger.
func (k *kq) Record(w *watched) {
k.idLkp[w.fd], k.pthLkp[w.p] = w, w
}
// Del implements trigger.
func (k *kq) Del(w *watched) {
syscall.Close(w.fd)
delete(k.idLkp, w.fd)
delete(k.pthLkp, w.p)
}
func inter2kq(n interface{}) syscall.Kevent_t {
kq, ok := n.(syscall.Kevent_t)
if !ok {
panic(fmt.Sprintf("kqueue: type should be Kevent_t, %T instead", n))
}
return kq
}
// Init implements trigger.
func (k *kq) Init() (err error) {
if k.fd, err = syscall.Kqueue(); err != nil {
return
}
// Creates pipe used to stop `Kevent` call by registering it,
// watching read end and writing to other end of it.
if err = syscall.Pipe(k.pipefds[:]); err != nil {
return nonil(err, k.Close())
}
var kevn [1]syscall.Kevent_t
syscall.SetKevent(&kevn[0], k.pipefds[0], syscall.EVFILT_READ, syscall.EV_ADD)
if _, err = syscall.Kevent(k.fd, kevn[:], nil, nil); err != nil {
return nonil(err, k.Close())
}
return
}
// Unwatch implements trigger.
func (k *kq) Unwatch(w *watched) (err error) {
var kevn [1]syscall.Kevent_t
syscall.SetKevent(&kevn[0], w.fd, syscall.EVFILT_VNODE, syscall.EV_DELETE)
_, err = syscall.Kevent(k.fd, kevn[:], nil, nil)
return
}
// Watch implements trigger.
func (k *kq) Watch(fi os.FileInfo, w *watched, e int64) (err error) {
var kevn [1]syscall.Kevent_t
syscall.SetKevent(&kevn[0], w.fd, syscall.EVFILT_VNODE,
syscall.EV_ADD|syscall.EV_CLEAR)
kevn[0].Fflags = uint32(e)
_, err = syscall.Kevent(k.fd, kevn[:], nil, nil)
return
}
// Wait implements trigger.
func (k *kq) Wait() (interface{}, error) {
var (
kevn [1]syscall.Kevent_t
err error
)
kevn[0] = syscall.Kevent_t{}
_, err = syscall.Kevent(k.fd, nil, kevn[:], nil)
return kevn[0], err
}
// Watched implements trigger.
func (k *kq) Watched(n interface{}) (*watched, int64, error) {
kevn, ok := n.(syscall.Kevent_t)
if !ok {
panic(fmt.Sprintf("kq: type should be syscall.Kevent_t, %T instead", kevn))
}
if _, ok = k.idLkp[int(kevn.Ident)]; !ok {
return nil, 0, errNotWatched
}
return k.idLkp[int(kevn.Ident)], int64(kevn.Fflags), nil
}
// IsStop implements trigger.
func (k *kq) IsStop(n interface{}, err error) bool {
return int(inter2kq(n).Ident) == k.pipefds[0]
}
func init() {
encode = func(e Event) (o int64) {
// Create event is not supported by kqueue. Instead NoteWrite event will
// be registered. If this event will be reported on dir which is to be
// monitored for Create, dir will be rescanned and Create events will
// be generated and returned for new files. In case of files,
// if not requested NoteRename event is reported, it will be ignored.
o = int64(e &^ Create)
if e&Write != 0 {
o = (o &^ int64(Write)) | int64(NoteWrite)
}
if e&Rename != 0 {
o = (o &^ int64(Rename)) | int64(NoteRename)
}
if e&Remove != 0 {
o = (o &^ int64(Remove)) | int64(NoteDelete)
}
return
}
nat2not = map[Event]Event{
NoteWrite: Write,
NoteRename: Rename,
NoteDelete: Remove,
NoteExtend: Event(0),
NoteAttrib: Event(0),
NoteRevoke: Event(0),
NoteLink: Event(0),
}
not2nat = map[Event]Event{
Write: NoteWrite,
Rename: NoteRename,
Remove: NoteDelete,
}
}

View File

@ -0,0 +1,574 @@
// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
// Use of this source code is governed by the MIT license that can be
// found in the LICENSE file.
// +build windows
package notify
import (
"errors"
"runtime"
"sync"
"sync/atomic"
"syscall"
"unsafe"
)
// readBufferSize defines the size of an array in which read statuses are stored.
// The buffer have to be DWORD-aligned and, if notify is used in monitoring a
// directory over the network, its size must not be greater than 64KB. Each of
// watched directories uses its own buffer for storing events.
const readBufferSize = 4096
// Since all operations which go through the Windows completion routine are done
// asynchronously, filter may set one of the constants belor. They were defined
// in order to distinguish whether current folder should be re-registered in
// ReadDirectoryChangesW function or some control operations need to be executed.
const (
stateRewatch uint32 = 1 << (28 + iota)
stateUnwatch
stateCPClose
)
// Filter used in current implementation was split into four segments:
// - bits 0-11 store ReadDirectoryChangesW filters,
// - bits 12-19 store File notify actions,
// - bits 20-27 store notify specific events and flags,
// - bits 28-31 store states which are used in loop's FSM.
// Constants below are used as masks to retrieve only specific filter parts.
const (
onlyNotifyChanges uint32 = 0x00000FFF
onlyNGlobalEvents uint32 = 0x0FF00000
onlyMachineStates uint32 = 0xF0000000
)
// grip represents a single watched directory. It stores the data required by
// ReadDirectoryChangesW function. Only the filter, recursive, and handle members
// may by modified by watcher implementation. Rest of the them have to remain
// constant since they are used by Windows completion routine. This indicates that
// grip can be removed only when all operations on the file handle are finished.
type grip struct {
handle syscall.Handle
filter uint32
recursive bool
pathw []uint16
buffer [readBufferSize]byte
parent *watched
ovlapped *overlappedEx
}
// overlappedEx stores information used in asynchronous input and output.
// Additionally, overlappedEx contains a pointer to 'grip' item which is used in
// order to gather the structure in which the overlappedEx object was created.
type overlappedEx struct {
syscall.Overlapped
parent *grip
}
// newGrip creates a new file handle that can be used in overlapped operations.
// Then, the handle is associated with I/O completion port 'cph' and its value
// is stored in newly created 'grip' object.
func newGrip(cph syscall.Handle, parent *watched, filter uint32) (*grip, error) {
g := &grip{
handle: syscall.InvalidHandle,
filter: filter,
recursive: parent.recursive,
pathw: parent.pathw,
parent: parent,
ovlapped: &overlappedEx{},
}
if err := g.register(cph); err != nil {
return nil, err
}
g.ovlapped.parent = g
return g, nil
}
// NOTE : Thread safe
func (g *grip) register(cph syscall.Handle) (err error) {
if g.handle, err = syscall.CreateFile(
&g.pathw[0],
syscall.FILE_LIST_DIRECTORY,
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
nil,
syscall.OPEN_EXISTING,
syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED,
0,
); err != nil {
return
}
if _, err = syscall.CreateIoCompletionPort(g.handle, cph, 0, 0); err != nil {
syscall.CloseHandle(g.handle)
return
}
return g.readDirChanges()
}
// readDirChanges tells the system to store file change information in grip's
// buffer. Directory changes that occur between calls to this function are added
// to the buffer and then, returned with the next call.
func (g *grip) readDirChanges() error {
return syscall.ReadDirectoryChanges(
g.handle,
&g.buffer[0],
uint32(unsafe.Sizeof(g.buffer)),
g.recursive,
encode(g.filter),
nil,
(*syscall.Overlapped)(unsafe.Pointer(g.ovlapped)),
0,
)
}
// encode transforms a generic filter, which contains platform independent and
// implementation specific bit fields, to value that can be used as NotifyFilter
// parameter in ReadDirectoryChangesW function.
func encode(filter uint32) uint32 {
e := Event(filter & (onlyNGlobalEvents | onlyNotifyChanges))
if e&dirmarker != 0 {
return uint32(FileNotifyChangeDirName)
}
if e&Create != 0 {
e = (e ^ Create) | FileNotifyChangeFileName
}
if e&Remove != 0 {
e = (e ^ Remove) | FileNotifyChangeFileName
}
if e&Write != 0 {
e = (e ^ Write) | FileNotifyChangeAttributes | FileNotifyChangeSize |
FileNotifyChangeCreation | FileNotifyChangeSecurity
}
if e&Rename != 0 {
e = (e ^ Rename) | FileNotifyChangeFileName
}
return uint32(e)
}
// watched is made in order to check whether an action comes from a directory or
// file. This approach requires two file handlers per single monitored folder. The
// second grip handles actions which include creating or deleting a directory. If
// these processes are not monitored, only the first grip is created.
type watched struct {
filter uint32
recursive bool
count uint8
pathw []uint16
digrip [2]*grip
}
// newWatched creates a new watched instance. It splits the filter variable into
// two parts. The first part is responsible for watching all events which can be
// created for a file in watched directory structure and the second one watches
// only directory Create/Remove actions. If all operations succeed, the Create
// message is sent to I/O completion port queue for further processing.
func newWatched(cph syscall.Handle, filter uint32, recursive bool,
path string) (wd *watched, err error) {
wd = &watched{
filter: filter,
recursive: recursive,
}
if wd.pathw, err = syscall.UTF16FromString(path); err != nil {
return
}
if err = wd.recreate(cph); err != nil {
return
}
return wd, nil
}
// TODO : doc
func (wd *watched) recreate(cph syscall.Handle) (err error) {
filefilter := wd.filter &^ uint32(FileNotifyChangeDirName)
if err = wd.updateGrip(0, cph, filefilter == 0, filefilter); err != nil {
return
}
dirfilter := wd.filter & uint32(FileNotifyChangeDirName|Create|Remove)
if err = wd.updateGrip(1, cph, dirfilter == 0, wd.filter|uint32(dirmarker)); err != nil {
return
}
wd.filter &^= onlyMachineStates
return
}
// TODO : doc
func (wd *watched) updateGrip(idx int, cph syscall.Handle, reset bool,
newflag uint32) (err error) {
if reset {
wd.digrip[idx] = nil
} else {
if wd.digrip[idx] == nil {
if wd.digrip[idx], err = newGrip(cph, wd, newflag); err != nil {
wd.closeHandle()
return
}
} else {
wd.digrip[idx].filter = newflag
wd.digrip[idx].recursive = wd.recursive
if err = wd.digrip[idx].register(cph); err != nil {
wd.closeHandle()
return
}
}
wd.count++
}
return
}
// closeHandle closes handles that are stored in digrip array. Function always
// tries to close all of the handlers before it exits, even when there are errors
// returned from the operating system kernel.
func (wd *watched) closeHandle() (err error) {
for _, g := range wd.digrip {
if g != nil && g.handle != syscall.InvalidHandle {
switch suberr := syscall.CloseHandle(g.handle); {
case suberr == nil:
g.handle = syscall.InvalidHandle
case err == nil:
err = suberr
}
}
}
return
}
// watcher implements Watcher interface. It stores a set of watched directories.
// All operations which remove watched objects from map `m` must be performed in
// loop goroutine since these structures are used internally by operating system.
type readdcw struct {
sync.Mutex
m map[string]*watched
cph syscall.Handle
start bool
wg sync.WaitGroup
c chan<- EventInfo
}
// NewWatcher creates new non-recursive watcher backed by ReadDirectoryChangesW.
func newWatcher(c chan<- EventInfo) watcher {
r := &readdcw{
m: make(map[string]*watched),
cph: syscall.InvalidHandle,
c: c,
}
runtime.SetFinalizer(r, func(r *readdcw) {
if r.cph != syscall.InvalidHandle {
syscall.CloseHandle(r.cph)
}
})
return r
}
// Watch implements notify.Watcher interface.
func (r *readdcw) Watch(path string, event Event) error {
return r.watch(path, event, false)
}
// RecursiveWatch implements notify.RecursiveWatcher interface.
func (r *readdcw) RecursiveWatch(path string, event Event) error {
return r.watch(path, event, true)
}
// watch inserts a directory to the group of watched folders. If watched folder
// already exists, function tries to rewatch it with new filters(NOT VALID). Moreover,
// watch starts the main event loop goroutine when called for the first time.
func (r *readdcw) watch(path string, event Event, recursive bool) (err error) {
if event&^(All|fileNotifyChangeAll) != 0 {
return errors.New("notify: unknown event")
}
r.Lock()
wd, ok := r.m[path]
r.Unlock()
if !ok {
if err = r.lazyinit(); err != nil {
return
}
r.Lock()
if wd, ok = r.m[path]; ok {
r.Unlock()
return
}
if wd, err = newWatched(r.cph, uint32(event), recursive, path); err != nil {
r.Unlock()
return
}
r.m[path] = wd
r.Unlock()
}
return nil
}
// lazyinit creates an I/O completion port and starts the main event processing
// loop. This method uses Double-Checked Locking optimization.
func (r *readdcw) lazyinit() (err error) {
invalid := uintptr(syscall.InvalidHandle)
if atomic.LoadUintptr((*uintptr)(&r.cph)) == invalid {
r.Lock()
defer r.Unlock()
if atomic.LoadUintptr((*uintptr)(&r.cph)) == invalid {
cph := syscall.InvalidHandle
if cph, err = syscall.CreateIoCompletionPort(cph, 0, 0, 0); err != nil {
return
}
r.cph, r.start = cph, true
go r.loop()
}
}
return
}
// TODO(pknap) : doc
func (r *readdcw) loop() {
var n, key uint32
var overlapped *syscall.Overlapped
for {
err := syscall.GetQueuedCompletionStatus(r.cph, &n, &key, &overlapped, syscall.INFINITE)
if key == stateCPClose {
r.Lock()
handle := r.cph
r.cph = syscall.InvalidHandle
r.Unlock()
syscall.CloseHandle(handle)
r.wg.Done()
return
}
if overlapped == nil {
// TODO: check key == rewatch delete or 0(panic)
continue
}
overEx := (*overlappedEx)(unsafe.Pointer(overlapped))
if n == 0 {
r.loopstate(overEx)
} else {
r.loopevent(n, overEx)
if err = overEx.parent.readDirChanges(); err != nil {
// TODO: error handling
}
}
}
}
// TODO(pknap) : doc
func (r *readdcw) loopstate(overEx *overlappedEx) {
filter := atomic.LoadUint32(&overEx.parent.parent.filter)
if filter&onlyMachineStates == 0 {
return
}
if overEx.parent.parent.count--; overEx.parent.parent.count == 0 {
switch filter & onlyMachineStates {
case stateRewatch:
r.Lock()
overEx.parent.parent.recreate(r.cph)
r.Unlock()
case stateUnwatch:
r.Lock()
delete(r.m, syscall.UTF16ToString(overEx.parent.pathw))
r.Unlock()
case stateCPClose:
default:
panic(`notify: windows loopstate logic error`)
}
}
}
// TODO(pknap) : doc
func (r *readdcw) loopevent(n uint32, overEx *overlappedEx) {
events := []*event{}
var currOffset uint32
for {
raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&overEx.parent.buffer[currOffset]))
name := syscall.UTF16ToString((*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName))[:raw.FileNameLength>>1])
events = append(events, &event{
pathw: overEx.parent.pathw,
filter: overEx.parent.filter,
action: raw.Action,
name: name,
})
if raw.NextEntryOffset == 0 {
break
}
if currOffset += raw.NextEntryOffset; currOffset >= n {
break
}
}
r.send(events)
}
// TODO(pknap) : doc
func (r *readdcw) send(es []*event) {
for _, e := range es {
var syse Event
if e.e, syse = decode(e.filter, e.action); e.e == 0 && syse == 0 {
continue
}
switch {
case e.action == syscall.FILE_ACTION_MODIFIED:
e.ftype = fTypeUnknown
case e.filter&uint32(dirmarker) != 0:
e.ftype = fTypeDirectory
default:
e.ftype = fTypeFile
}
switch {
case e.e == 0:
e.e = syse
case syse != 0:
r.c <- &event{
pathw: e.pathw,
name: e.name,
ftype: e.ftype,
action: e.action,
filter: e.filter,
e: syse,
}
}
r.c <- e
}
}
// Rewatch implements notify.Rewatcher interface.
func (r *readdcw) Rewatch(path string, oldevent, newevent Event) error {
return r.rewatch(path, uint32(oldevent), uint32(newevent), false)
}
// RecursiveRewatch implements notify.RecursiveRewatcher interface.
func (r *readdcw) RecursiveRewatch(oldpath, newpath string, oldevent,
newevent Event) error {
if oldpath != newpath {
if err := r.unwatch(oldpath); err != nil {
return err
}
return r.watch(newpath, newevent, true)
}
return r.rewatch(newpath, uint32(oldevent), uint32(newevent), true)
}
// TODO : (pknap) doc.
func (r *readdcw) rewatch(path string, oldevent, newevent uint32, recursive bool) (err error) {
if Event(newevent)&^(All|fileNotifyChangeAll) != 0 {
return errors.New("notify: unknown event")
}
var wd *watched
r.Lock()
if wd, err = r.nonStateWatched(path); err != nil {
r.Unlock()
return
}
if wd.filter&(onlyNotifyChanges|onlyNGlobalEvents) != oldevent {
panic(`notify: windows re-watcher logic error`)
}
wd.filter = stateRewatch | newevent
wd.recursive, recursive = recursive, wd.recursive
if err = wd.closeHandle(); err != nil {
wd.filter = oldevent
wd.recursive = recursive
r.Unlock()
return
}
r.Unlock()
return
}
// TODO : pknap
func (r *readdcw) nonStateWatched(path string) (wd *watched, err error) {
wd, ok := r.m[path]
if !ok || wd == nil {
err = errors.New(`notify: ` + path + ` path is unwatched`)
return
}
if filter := atomic.LoadUint32(&wd.filter); filter&onlyMachineStates != 0 {
err = errors.New(`notify: another re/unwatching operation in progress`)
return
}
return
}
// Unwatch implements notify.Watcher interface.
func (r *readdcw) Unwatch(path string) error {
return r.unwatch(path)
}
// RecursiveUnwatch implements notify.RecursiveWatcher interface.
func (r *readdcw) RecursiveUnwatch(path string) error {
return r.unwatch(path)
}
// TODO : pknap
func (r *readdcw) unwatch(path string) (err error) {
var wd *watched
r.Lock()
if wd, err = r.nonStateWatched(path); err != nil {
r.Unlock()
return
}
wd.filter |= stateUnwatch
if err = wd.closeHandle(); err != nil {
wd.filter &^= stateUnwatch
r.Unlock()
return
}
r.Unlock()
return
}
// Close resets the whole watcher object, closes all existing file descriptors,
// and sends stateCPClose state as completion key to the main watcher's loop.
func (r *readdcw) Close() (err error) {
r.Lock()
if !r.start {
r.Unlock()
return nil
}
for _, wd := range r.m {
wd.filter &^= onlyMachineStates
wd.filter |= stateCPClose
if e := wd.closeHandle(); e != nil && err == nil {
err = e
}
}
r.start = false
r.Unlock()
r.wg.Add(1)
if e := syscall.PostQueuedCompletionStatus(r.cph, 0, stateCPClose, nil); e != nil && err == nil {
return e
}
r.wg.Wait()
return
}
// decode creates a notify event from both non-raw filter and action which was
// returned from completion routine. Function may return Event(0) in case when
// filter was replaced by a new value which does not contain fields that are
// valid with passed action.
func decode(filter, action uint32) (Event, Event) {
switch action {
case syscall.FILE_ACTION_ADDED:
return gensys(filter, Create, FileActionAdded)
case syscall.FILE_ACTION_REMOVED:
return gensys(filter, Remove, FileActionRemoved)
case syscall.FILE_ACTION_MODIFIED:
return gensys(filter, Write, FileActionModified)
case syscall.FILE_ACTION_RENAMED_OLD_NAME:
return gensys(filter, Rename, FileActionRenamedOldName)
case syscall.FILE_ACTION_RENAMED_NEW_NAME:
return gensys(filter, Rename, FileActionRenamedNewName)
}
panic(`notify: cannot decode internal mask`)
}
// gensys decides whether the Windows action, system-independent event or both
// of them should be returned. Since the grip's filter may be atomically changed
// during watcher lifetime, it is possible that neither Windows nor notify masks
// are watched by the user when this function is called.
func gensys(filter uint32, ge, se Event) (gene, syse Event) {
isdir := filter&uint32(dirmarker) != 0
if isdir && filter&uint32(FileNotifyChangeDirName) != 0 ||
!isdir && filter&uint32(FileNotifyChangeFileName) != 0 ||
filter&uint32(fileNotifyChangeModified) != 0 {
syse = se
}
if filter&uint32(ge) != 0 {
gene = ge
}
return
}

View File

@ -0,0 +1,23 @@
// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
// Use of this source code is governed by the MIT license that can be
// found in the LICENSE file.
// +build !darwin,!linux,!freebsd,!dragonfly,!netbsd,!openbsd,!windows
// +build !kqueue,!solaris
package notify
import "errors"
type stub struct{ error }
// newWatcher stub.
func newWatcher(chan<- EventInfo) watcher {
return stub{errors.New("notify: not implemented")}
}
// Following methods implement notify.watcher interface.
func (s stub) Watch(string, Event) error { return s }
func (s stub) Rewatch(string, Event, Event) error { return s }
func (s stub) Unwatch(string) (err error) { return s }
func (s stub) Close() error { return s }

View File

@ -0,0 +1,438 @@
// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
// Use of this source code is governed by the MIT license that can be
// found in the LICENSE file.
// +build darwin,kqueue dragonfly freebsd netbsd openbsd solaris
// watcher_trigger is used for FEN and kqueue which behave similarly:
// only files and dirs can be watched directly, but not files inside dirs.
// As a result Create events have to be generated by implementation when
// after Write event is returned for watched dir, it is rescanned and Create
// event is returned for new files and these are automatically added
// to watchlist. In case of removal of watched directory, native system returns
// events for all files, but for Rename, they also need to be generated.
// As a result native system works as something like trigger for rescan,
// but contains additional data about dir in which changes occurred. For files
// detailed data is returned.
// Usage of watcher_trigger requires:
// - trigger implementation,
// - encode func,
// - not2nat, nat2not maps.
// Required manual operations on filesystem can lead to loss of precision.
package notify
import (
"os"
"path/filepath"
"strings"
"sync"
"syscall"
)
// trigger is to be implemented by platform implementation like FEN or kqueue.
type trigger interface {
// Close closes watcher's main native file descriptor.
Close() error
// Stop waiting for new events.
Stop() error
// Create new instance of watched.
NewWatched(string, os.FileInfo) (*watched, error)
// Record internally new *watched instance.
Record(*watched)
// Del removes internal copy of *watched instance.
Del(*watched)
// Watched returns *watched instance and native events for native type.
Watched(interface{}) (*watched, int64, error)
// Init initializes native watcher call.
Init() error
// Watch starts watching provided file/dir.
Watch(os.FileInfo, *watched, int64) error
// Unwatch stops watching provided file/dir.
Unwatch(*watched) error
// Wait for new events.
Wait() (interface{}, error)
// IsStop checks if Wait finished because of request watcher's stop.
IsStop(n interface{}, err error) bool
}
// encode Event to native representation. Implementation is to be provided by
// platform specific implementation.
var encode func(Event) int64
var (
// nat2not matches native events to notify's ones. To be initialized by
// platform dependent implementation.
nat2not map[Event]Event
// not2nat matches notify's events to native ones. To be initialized by
// platform dependent implementation.
not2nat map[Event]Event
)
// trg is a main structure implementing watcher.
type trg struct {
sync.Mutex
// s is a channel used to stop monitoring.
s chan struct{}
// c is a channel used to pass events further.
c chan<- EventInfo
// pthLkp is a data structure mapping file names with data about watching
// represented by them files/directories.
pthLkp map[string]*watched
// t is a platform dependent implementation of trigger.
t trigger
}
// newWatcher returns new watcher's implementation.
func newWatcher(c chan<- EventInfo) watcher {
t := &trg{
s: make(chan struct{}, 1),
pthLkp: make(map[string]*watched, 0),
c: c,
}
t.t = newTrigger(t.pthLkp)
if err := t.t.Init(); err != nil {
panic(err)
}
go t.monitor()
return t
}
// Close implements watcher.
func (t *trg) Close() (err error) {
t.Lock()
if err = t.t.Stop(); err != nil {
t.Unlock()
return
}
<-t.s
var e error
for _, w := range t.pthLkp {
if e = t.unwatch(w.p, w.fi); e != nil {
dbgprintf("trg: unwatch %q failed: %q\n", w.p, e)
err = nonil(err, e)
}
}
if e = t.t.Close(); e != nil {
dbgprintf("trg: closing native watch failed: %q\n", e)
err = nonil(err, e)
}
t.Unlock()
return
}
// send reported events one by one through chan.
func (t *trg) send(evn []event) {
for i := range evn {
t.c <- &evn[i]
}
}
// singlewatch starts to watch given p file/directory.
func (t *trg) singlewatch(p string, e Event, direct mode, fi os.FileInfo) (err error) {
w, ok := t.pthLkp[p]
if !ok {
if w, err = t.t.NewWatched(p, fi); err != nil {
return
}
}
switch direct {
case dir:
w.eDir |= e
case ndir:
w.eNonDir |= e
case both:
w.eDir |= e
w.eNonDir |= e
}
var ee int64
// Native Write event is added to wait for Create events (Write event on
// directory triggers it's rescan).
if e&Create != 0 && fi.IsDir() {
ee = int64(not2nat[Write])
}
if err = t.t.Watch(fi, w, encode(w.eDir|w.eNonDir)|ee); err != nil {
return
}
if !ok {
t.t.Record(w)
return nil
}
return errAlreadyWatched
}
// decode converts event received from native to notify.Event
// representation taking into account requested events (w).
func decode(o int64, w Event) (e Event) {
for f, n := range nat2not {
if o&int64(f) != 0 {
if w&f != 0 {
e |= f
}
if w&n != 0 {
e |= n
}
}
}
return
}
func (t *trg) watch(p string, e Event, fi os.FileInfo) error {
if err := t.singlewatch(p, e, dir, fi); err != nil {
if err != errAlreadyWatched {
return nil
}
}
if fi.IsDir() {
err := t.walk(p, func(fi os.FileInfo) (err error) {
if err = t.singlewatch(filepath.Join(p, fi.Name()), e, ndir,
fi); err != nil {
if err != errAlreadyWatched {
return
}
}
return nil
})
if err != nil {
return err
}
}
return nil
}
// walk runs f func on each file/dir from p directory.
func (t *trg) walk(p string, fn func(os.FileInfo) error) error {
fp, err := os.Open(p)
if err != nil {
return err
}
ls, err := fp.Readdir(0)
fp.Close()
if err != nil {
return err
}
for i := range ls {
if err := fn(ls[i]); err != nil {
return err
}
}
return nil
}
func (t *trg) unwatch(p string, fi os.FileInfo) error {
if fi.IsDir() {
err := t.walk(p, func(fi os.FileInfo) error {
err := t.singleunwatch(filepath.Join(p, fi.Name()), ndir)
if err != errNotWatched {
return err
}
return nil
})
if err != nil {
return err
}
}
return t.singleunwatch(p, dir)
}
// Watch implements Watcher interface.
func (t *trg) Watch(p string, e Event) error {
fi, err := os.Stat(p)
if err != nil {
return err
}
t.Lock()
err = t.watch(p, e, fi)
t.Unlock()
return err
}
// Unwatch implements Watcher interface.
func (t *trg) Unwatch(p string) error {
fi, err := os.Stat(p)
if err != nil {
return err
}
t.Lock()
err = t.unwatch(p, fi)
t.Unlock()
return err
}
// Rewatch implements Watcher interface.
//
// TODO(rjeczalik): This is a naive hack. Rewrite might help.
func (t *trg) Rewatch(p string, _, e Event) error {
fi, err := os.Stat(p)
if err != nil {
return err
}
t.Lock()
if err = t.unwatch(p, fi); err == nil {
// TODO(rjeczalik): If watch fails then we leave trigger in inconsistent
// state. Handle? Panic? Native version of rewatch?
err = t.watch(p, e, fi)
}
t.Unlock()
return nil
}
func (*trg) file(w *watched, n interface{}, e Event) (evn []event) {
evn = append(evn, event{w.p, e, w.fi.IsDir(), n})
return
}
func (t *trg) dir(w *watched, n interface{}, e, ge Event) (evn []event) {
// If it's dir and delete we have to send it and continue, because
// other processing relies on opening (in this case not existing) dir.
// Events for contents of this dir are reported by native impl.
// However events for rename must be generated for all monitored files
// inside of moved directory, because native impl does not report it independently
// for each file descriptor being moved in result of move action on
// parent dirLiczba dostępnych dni urlopowych: 0ectory.
if (ge & (not2nat[Rename] | not2nat[Remove])) != 0 {
// Write is reported also for Remove on directory. Because of that
// we have to filter it out explicitly.
evn = append(evn, event{w.p, e & ^Write & ^not2nat[Write], true, n})
if ge&not2nat[Rename] != 0 {
for p := range t.pthLkp {
if strings.HasPrefix(p, w.p+string(os.PathSeparator)) {
if err := t.singleunwatch(p, both); err != nil && err != errNotWatched &&
!os.IsNotExist(err) {
dbgprintf("trg: failed stop watching moved file (%q): %q\n",
p, err)
}
if (w.eDir|w.eNonDir)&(not2nat[Rename]|Rename) != 0 {
evn = append(evn, event{
p, (w.eDir | w.eNonDir) & e &^ Write &^ not2nat[Write],
w.fi.IsDir(), nil,
})
}
}
}
}
t.t.Del(w)
return
}
if (ge & not2nat[Write]) != 0 {
switch err := t.walk(w.p, func(fi os.FileInfo) error {
p := filepath.Join(w.p, fi.Name())
switch err := t.singlewatch(p, w.eDir, ndir, fi); {
case os.IsNotExist(err) && ((w.eDir & Remove) != 0):
evn = append(evn, event{p, Remove, fi.IsDir(), n})
case err == errAlreadyWatched:
case err != nil:
dbgprintf("trg: watching %q failed: %q", p, err)
case (w.eDir & Create) != 0:
evn = append(evn, event{p, Create, fi.IsDir(), n})
default:
}
return nil
}); {
case os.IsNotExist(err):
return
case err != nil:
dbgprintf("trg: dir processing failed: %q", err)
default:
}
}
return
}
type mode uint
const (
dir mode = iota
ndir
both
)
// unwatch stops watching p file/directory.
func (t *trg) singleunwatch(p string, direct mode) error {
w, ok := t.pthLkp[p]
if !ok {
return errNotWatched
}
switch direct {
case dir:
w.eDir = 0
case ndir:
w.eNonDir = 0
case both:
w.eDir, w.eNonDir = 0, 0
}
if err := t.t.Unwatch(w); err != nil {
return err
}
if w.eNonDir|w.eDir != 0 {
mod := dir
if w.eNonDir == 0 {
mod = ndir
}
if err := t.singlewatch(p, w.eNonDir|w.eDir, mod,
w.fi); err != nil && err != errAlreadyWatched {
return err
}
} else {
t.t.Del(w)
}
return nil
}
func (t *trg) monitor() {
var (
n interface{}
err error
)
for {
switch n, err = t.t.Wait(); {
case err == syscall.EINTR:
case t.t.IsStop(n, err):
t.s <- struct{}{}
return
case err != nil:
dbgprintf("trg: failed to read events: %q\n", err)
default:
t.send(t.process(n))
}
}
}
// process event returned by port_get call.
func (t *trg) process(n interface{}) (evn []event) {
t.Lock()
w, ge, err := t.t.Watched(n)
if err != nil {
t.Unlock()
dbgprintf("trg: %v event lookup failed: %q", Event(ge), err)
return
}
e := decode(ge, w.eDir|w.eNonDir)
if ge&int64(not2nat[Remove]|not2nat[Rename]) == 0 {
switch fi, err := os.Stat(w.p); {
case err != nil:
default:
if err = t.t.Watch(fi, w, (encode(w.eDir | w.eNonDir))); err != nil {
dbgprintf("trg: %q is no longer watched: %q", w.p, err)
t.t.Del(w)
}
}
}
if e == Event(0) {
t.Unlock()
return
}
if w.fi.IsDir() {
evn = append(evn, t.dir(w, n, e, Event(ge))...)
} else {
evn = append(evn, t.file(w, n, e)...)
}
if Event(ge)&(not2nat[Remove]|not2nat[Rename]) != 0 {
t.t.Del(w)
}
t.Unlock()
return
}

View File

@ -0,0 +1,103 @@
// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
// Use of this source code is governed by the MIT license that can be
// found in the LICENSE file.
package notify
// EventDiff describes a change to an event set - EventDiff[0] is an old state,
// while EventDiff[1] is a new state. If event set has not changed (old == new),
// functions typically return the None value.
type eventDiff [2]Event
func (diff eventDiff) Event() Event {
return diff[1] &^ diff[0]
}
// Watchpoint
//
// The nil key holds total event set - logical sum for all registered events.
// It speeds up computing EventDiff for Add method.
//
// The rec key holds an event set for a watchpoints created by RecursiveWatch
// for a Watcher implementation which is not natively recursive.
type watchpoint map[chan<- EventInfo]Event
// None is an empty event diff, think null object.
var none eventDiff
// rec is just a placeholder
var rec = func() (ch chan<- EventInfo) {
ch = make(chan<- EventInfo)
close(ch)
return
}()
func (wp watchpoint) dryAdd(ch chan<- EventInfo, e Event) eventDiff {
if e &^= internal; wp[ch]&e == e {
return none
}
total := wp[ch] &^ internal
return eventDiff{total, total | e}
}
// Add assumes neither c nor e are nil or zero values.
func (wp watchpoint) Add(c chan<- EventInfo, e Event) (diff eventDiff) {
wp[c] |= e
diff[0] = wp[nil]
diff[1] = diff[0] | e
wp[nil] = diff[1] &^ omit
// Strip diff from internal events.
diff[0] &^= internal
diff[1] &^= internal
if diff[0] == diff[1] {
return none
}
return
}
func (wp watchpoint) Del(c chan<- EventInfo, e Event) (diff eventDiff) {
wp[c] &^= e
if wp[c] == 0 {
delete(wp, c)
}
diff[0] = wp[nil]
delete(wp, nil)
if len(wp) != 0 {
// Recalculate total event set.
for _, e := range wp {
diff[1] |= e
}
wp[nil] = diff[1] &^ omit
}
// Strip diff from internal events.
diff[0] &^= internal
diff[1] &^= internal
if diff[0] == diff[1] {
return none
}
return
}
func (wp watchpoint) Dispatch(ei EventInfo, extra Event) {
e := eventmask(ei, extra)
if !matches(wp[nil], e) {
return
}
for ch, eset := range wp {
if ch != nil && matches(eset, e) {
select {
case ch <- ei:
default: // Drop event if receiver is too slow
dbgprintf("dropped %s on %q: receiver too slow", ei.Event(), ei.Path())
}
}
}
}
func (wp watchpoint) Total() Event {
return wp[nil] &^ internal
}
func (wp watchpoint) IsRecursive() bool {
return wp[nil]&recursive != 0
}

View File

@ -0,0 +1,23 @@
// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
// Use of this source code is governed by the MIT license that can be
// found in the LICENSE file.
// +build !windows
package notify
// eventmask uses ei to create a new event which contains internal flags used by
// notify package logic.
func eventmask(ei EventInfo, extra Event) Event {
return ei.Event() | extra
}
// matches reports a match only when:
//
// - for user events, when event is present in the given set
// - for internal events, when additionaly both event and set have omit bit set
//
// Internal events must not be sent to user channels and vice versa.
func matches(set, event Event) bool {
return (set&omit)^(event&omit) == 0 && set&event == event
}

View File

@ -0,0 +1,38 @@
// Copyright (c) 2014-2015 The Notify Authors. All rights reserved.
// Use of this source code is governed by the MIT license that can be
// found in the LICENSE file.
// +build windows
package notify
// eventmask uses ei to create a new event which contains internal flags used by
// notify package logic. If one of FileAction* masks is detected, this function
// adds corresponding FileNotifyChange* values. This allows non registered
// FileAction* events to be passed on.
func eventmask(ei EventInfo, extra Event) (e Event) {
if e = ei.Event() | extra; e&fileActionAll != 0 {
if ev, ok := ei.(*event); ok {
switch ev.ftype {
case fTypeFile:
e |= FileNotifyChangeFileName
case fTypeDirectory:
e |= FileNotifyChangeDirName
case fTypeUnknown:
e |= fileNotifyChangeModified
}
return e &^ fileActionAll
}
}
return
}
// matches reports a match only when:
//
// - for user events, when event is present in the given set
// - for internal events, when additionally both event and set have omit bit set
//
// Internal events must not be sent to user channels and vice versa.
func matches(set, event Event) bool {
return (set&omit)^(event&omit) == 0 && (set&event == event || set&fileNotifyChangeModified&event != 0)
}

View File

@ -17,10 +17,12 @@
package bind
import (
"crypto/ecdsa"
"errors"
"io"
"io/ioutil"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
@ -33,23 +35,24 @@ func NewTransactor(keyin io.Reader, passphrase string) (*TransactOpts, error) {
if err != nil {
return nil, err
}
key, err := crypto.DecryptKey(json, passphrase)
key, err := accounts.DecryptKey(json, passphrase)
if err != nil {
return nil, err
}
return NewKeyedTransactor(key), nil
return NewKeyedTransactor(key.PrivateKey), nil
}
// NewKeyedTransactor is a utility method to easily create a transaction signer
// from a plain go-ethereum crypto key.
func NewKeyedTransactor(key *crypto.Key) *TransactOpts {
// from a single private key.
func NewKeyedTransactor(key *ecdsa.PrivateKey) *TransactOpts {
keyAddr := crypto.PubkeyToAddress(key.PublicKey)
return &TransactOpts{
From: key.Address,
From: keyAddr,
Signer: func(address common.Address, tx *types.Transaction) (*types.Transaction, error) {
if address != key.Address {
if address != keyAddr {
return nil, errors.New("not authorized to sign this account")
}
signature, err := crypto.Sign(tx.SigHash().Bytes(), key.PrivateKey)
signature, err := crypto.Sign(tx.SigHash().Bytes(), key)
if err != nil {
return nil, err
}

View File

@ -167,11 +167,9 @@ var bindTests = []struct {
`[{"constant":true,"inputs":[],"name":"transactString","outputs":[{"name":"","type":"string"}],"type":"function"},{"constant":true,"inputs":[],"name":"deployString","outputs":[{"name":"","type":"string"}],"type":"function"},{"constant":false,"inputs":[{"name":"str","type":"string"}],"name":"transact","outputs":[],"type":"function"},{"inputs":[{"name":"str","type":"string"}],"type":"constructor"}]`,
`
// Generate a new random account and a funded simulator
key := crypto.NewKey(rand.Reader)
sim := backends.NewSimulatedBackend(core.GenesisAccount{Address: key.Address, Balance: big.NewInt(10000000000)})
// Convert the tester key to an authorized transactor for ease of use
key, _ := crypto.GenerateKey()
auth := bind.NewKeyedTransactor(key)
sim := backends.NewSimulatedBackend(core.GenesisAccount{Address: auth.From, Balance: big.NewInt(10000000000)})
// Deploy an interaction tester contract and call a transaction on it
_, _, interactor, err := DeployInteractor(auth, sim, "Deploy string")
@ -210,11 +208,9 @@ var bindTests = []struct {
`[{"constant":true,"inputs":[],"name":"tuple","outputs":[{"name":"a","type":"string"},{"name":"b","type":"int256"},{"name":"c","type":"bytes32"}],"type":"function"}]`,
`
// Generate a new random account and a funded simulator
key := crypto.NewKey(rand.Reader)
sim := backends.NewSimulatedBackend(core.GenesisAccount{Address: key.Address, Balance: big.NewInt(10000000000)})
// Convert the tester key to an authorized transactor for ease of use
key, _ := crypto.GenerateKey()
auth := bind.NewKeyedTransactor(key)
sim := backends.NewSimulatedBackend(core.GenesisAccount{Address: auth.From, Balance: big.NewInt(10000000000)})
// Deploy a tuple tester contract and execute a structured call on it
_, _, tupler, err := DeployTupler(auth, sim)
@ -252,11 +248,9 @@ var bindTests = []struct {
`[{"constant":true,"inputs":[{"name":"input","type":"address[]"}],"name":"echoAddresses","outputs":[{"name":"output","type":"address[]"}],"type":"function"},{"constant":true,"inputs":[{"name":"input","type":"uint24[23]"}],"name":"echoFancyInts","outputs":[{"name":"output","type":"uint24[23]"}],"type":"function"},{"constant":true,"inputs":[{"name":"input","type":"int256[]"}],"name":"echoInts","outputs":[{"name":"output","type":"int256[]"}],"type":"function"},{"constant":true,"inputs":[{"name":"input","type":"bool[]"}],"name":"echoBools","outputs":[{"name":"output","type":"bool[]"}],"type":"function"}]`,
`
// Generate a new random account and a funded simulator
key := crypto.NewKey(rand.Reader)
sim := backends.NewSimulatedBackend(core.GenesisAccount{Address: key.Address, Balance: big.NewInt(10000000000)})
// Convert the tester key to an authorized transactor for ease of use
key, _ := crypto.GenerateKey()
auth := bind.NewKeyedTransactor(key)
sim := backends.NewSimulatedBackend(core.GenesisAccount{Address: auth.From, Balance: big.NewInt(10000000000)})
// Deploy a slice tester contract and execute a n array call on it
_, _, slicer, err := DeploySlicer(auth, sim)
@ -265,10 +259,10 @@ var bindTests = []struct {
}
sim.Commit()
if out, err := slicer.EchoAddresses(nil, []common.Address{key.Address, common.Address{}}); err != nil {
if out, err := slicer.EchoAddresses(nil, []common.Address{auth.From, common.Address{}}); err != nil {
t.Fatalf("Failed to call slice echoer: %v", err)
} else if !reflect.DeepEqual(out, []common.Address{key.Address, common.Address{}}) {
t.Fatalf("Slice return mismatch: have %v, want %v", out, []common.Address{key.Address, common.Address{}})
} else if !reflect.DeepEqual(out, []common.Address{auth.From, common.Address{}}) {
t.Fatalf("Slice return mismatch: have %v, want %v", out, []common.Address{auth.From, common.Address{}})
}
`,
},
@ -288,11 +282,9 @@ var bindTests = []struct {
`[{"constant":true,"inputs":[],"name":"caller","outputs":[{"name":"","type":"address"}],"type":"function"}]`,
`
// Generate a new random account and a funded simulator
key := crypto.NewKey(rand.Reader)
sim := backends.NewSimulatedBackend(core.GenesisAccount{Address: key.Address, Balance: big.NewInt(10000000000)})
// Convert the tester key to an authorized transactor for ease of use
key, _ := crypto.GenerateKey()
auth := bind.NewKeyedTransactor(key)
sim := backends.NewSimulatedBackend(core.GenesisAccount{Address: auth.From, Balance: big.NewInt(10000000000)})
// Deploy a default method invoker contract and execute its default method
_, _, defaulter, err := DeployDefaulter(auth, sim)
@ -306,8 +298,8 @@ var bindTests = []struct {
if caller, err := defaulter.Caller(nil); err != nil {
t.Fatalf("Failed to call address retriever: %v", err)
} else if (caller != key.Address) {
t.Fatalf("Address mismatch: have %v, want %v", caller, key.Address)
} else if (caller != auth.From) {
t.Fatalf("Address mismatch: have %v, want %v", caller, auth.From)
}
`,
},

View File

@ -14,20 +14,21 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package implements a private key management facility.
// Package accounts implements encrypted storage of secp256k1 private keys.
//
// This abstracts part of a user's interaction with an account she controls.
// Keys are stored as encrypted JSON files according to the Web3 Secret Storage specification.
// See https://github.com/ethereum/wiki/wiki/Web3-Secret-Storage-Definition for more information.
package accounts
// Currently this is pretty much a passthrough to the KeyStore interface,
// and accounts persistence is derived from stored keys' addresses
import (
"crypto/ecdsa"
crand "crypto/rand"
"encoding/json"
"errors"
"fmt"
"os"
"path/filepath"
"runtime"
"sync"
"time"
@ -36,109 +37,182 @@ import (
)
var (
ErrLocked = errors.New("account is locked")
ErrNoKeys = errors.New("no keys in store")
ErrLocked = errors.New("account is locked")
ErrNoMatch = errors.New("no key for given address or file")
ErrDecrypt = errors.New("could not decrypt key with given passphrase")
)
// Account represents a stored key.
// When used as an argument, it selects a unique key file to act on.
type Account struct {
Address common.Address
Address common.Address // Ethereum account address derived from the key
// File contains the key file name.
// When Acccount is used as an argument to select a key, File can be left blank to
// select just by address or set to the basename or absolute path of a file in the key
// directory. Accounts returned by Manager will always contain an absolute path.
File string
}
func (acc *Account) MarshalJSON() ([]byte, error) {
return []byte(`"` + acc.Address.Hex() + `"`), nil
}
func (acc *Account) UnmarshalJSON(raw []byte) error {
return json.Unmarshal(raw, &acc.Address)
}
// Manager manages a key storage directory on disk.
type Manager struct {
keyStore crypto.KeyStore
cache *addrCache
keyStore keyStore
mu sync.RWMutex
unlocked map[common.Address]*unlocked
mutex sync.RWMutex
}
type unlocked struct {
*crypto.Key
*Key
abort chan struct{}
}
func NewManager(keyStore crypto.KeyStore) *Manager {
return &Manager{
keyStore: keyStore,
unlocked: make(map[common.Address]*unlocked),
// NewManager creates a manager for the given directory.
func NewManager(keydir string, scryptN, scryptP int) *Manager {
keydir, _ = filepath.Abs(keydir)
am := &Manager{keyStore: &keyStorePassphrase{keydir, scryptN, scryptP}}
am.init(keydir)
return am
}
// NewPlaintextManager creates a manager for the given directory.
// Deprecated: Use NewManager.
func NewPlaintextManager(keydir string) *Manager {
keydir, _ = filepath.Abs(keydir)
am := &Manager{keyStore: &keyStorePlain{keydir}}
am.init(keydir)
return am
}
func (am *Manager) init(keydir string) {
am.unlocked = make(map[common.Address]*unlocked)
am.cache = newAddrCache(keydir)
// TODO: In order for this finalizer to work, there must be no references
// to am. addrCache doesn't keep a reference but unlocked keys do,
// so the finalizer will not trigger until all timed unlocks have expired.
runtime.SetFinalizer(am, func(m *Manager) {
m.cache.close()
})
}
// HasAddress reports whether a key with the given address is present.
func (am *Manager) HasAddress(addr common.Address) bool {
return am.cache.hasAddress(addr)
}
// Accounts returns all key files present in the directory.
func (am *Manager) Accounts() []Account {
return am.cache.accounts()
}
// DeleteAccount deletes the key matched by account if the passphrase is correct.
// If a contains no filename, the address must match a unique key.
func (am *Manager) DeleteAccount(a Account, passphrase string) error {
// Decrypting the key isn't really necessary, but we do
// it anyway to check the password and zero out the key
// immediately afterwards.
a, key, err := am.getDecryptedKey(a, passphrase)
if key != nil {
zeroKey(key.PrivateKey)
}
}
func (am *Manager) HasAccount(addr common.Address) bool {
accounts, _ := am.Accounts()
for _, acct := range accounts {
if acct.Address == addr {
return true
}
if err != nil {
return err
}
return false
// The order is crucial here. The key is dropped from the
// cache after the file is gone so that a reload happening in
// between won't insert it into the cache again.
err = os.Remove(a.File)
if err == nil {
am.cache.delete(a)
}
return err
}
func (am *Manager) DeleteAccount(address common.Address, auth string) error {
return am.keyStore.DeleteKey(address, auth)
}
func (am *Manager) Sign(a Account, toSign []byte) (signature []byte, err error) {
am.mutex.RLock()
defer am.mutex.RUnlock()
unlockedKey, found := am.unlocked[a.Address]
// Sign signs hash with an unlocked private key matching the given address.
func (am *Manager) Sign(addr common.Address, hash []byte) (signature []byte, err error) {
am.mu.RLock()
defer am.mu.RUnlock()
unlockedKey, found := am.unlocked[addr]
if !found {
return nil, ErrLocked
}
signature, err = crypto.Sign(toSign, unlockedKey.PrivateKey)
return signature, err
return crypto.Sign(hash, unlockedKey.PrivateKey)
}
// Unlock unlocks the given account indefinitely.
func (am *Manager) Unlock(addr common.Address, keyAuth string) error {
return am.TimedUnlock(addr, keyAuth, 0)
func (am *Manager) Unlock(a Account, keyAuth string) error {
return am.TimedUnlock(a, keyAuth, 0)
}
// Lock removes the private key with the given address from memory.
func (am *Manager) Lock(addr common.Address) error {
am.mutex.Lock()
am.mu.Lock()
if unl, found := am.unlocked[addr]; found {
am.mutex.Unlock()
am.mu.Unlock()
am.expire(addr, unl, time.Duration(0)*time.Nanosecond)
} else {
am.mutex.Unlock()
am.mu.Unlock()
}
return nil
}
// TimedUnlock unlocks the account with the given address. The account
// TimedUnlock unlocks the given account with the passphrase. The account
// stays unlocked for the duration of timeout. A timeout of 0 unlocks the account
// until the program exits.
// until the program exits. The account must match a unique key file.
//
// If the accout is already unlocked, TimedUnlock extends or shortens
// the active unlock timeout.
func (am *Manager) TimedUnlock(addr common.Address, keyAuth string, timeout time.Duration) error {
key, err := am.keyStore.GetKey(addr, keyAuth)
// If the account address is already unlocked for a duration, TimedUnlock extends or
// shortens the active unlock timeout. If the address was previously unlocked
// indefinitely the timeout is not altered.
func (am *Manager) TimedUnlock(a Account, passphrase string, timeout time.Duration) error {
a, key, err := am.getDecryptedKey(a, passphrase)
if err != nil {
return err
}
var u *unlocked
am.mutex.Lock()
defer am.mutex.Unlock()
var found bool
u, found = am.unlocked[addr]
am.mu.Lock()
defer am.mu.Unlock()
u, found := am.unlocked[a.Address]
if found {
// terminate dropLater for this key to avoid unexpected drops.
if u.abort != nil {
if u.abort == nil {
// The address was unlocked indefinitely, so unlocking
// it with a timeout would be confusing.
zeroKey(key.PrivateKey)
return nil
} else {
// Terminate the expire goroutine and replace it below.
close(u.abort)
}
}
if timeout > 0 {
u = &unlocked{Key: key, abort: make(chan struct{})}
go am.expire(addr, u, timeout)
go am.expire(a.Address, u, timeout)
} else {
u = &unlocked{Key: key}
}
am.unlocked[addr] = u
am.unlocked[a.Address] = u
return nil
}
func (am *Manager) getDecryptedKey(a Account, auth string) (Account, *Key, error) {
am.cache.maybeReload()
am.cache.mu.Lock()
a, err := am.cache.find(a)
am.cache.mu.Unlock()
if err != nil {
return a, nil, err
}
key, err := am.keyStore.GetKey(a.Address, a.File, auth)
return a, key, err
}
func (am *Manager) expire(addr common.Address, u *unlocked, timeout time.Duration) {
t := time.NewTimer(timeout)
defer t.Stop()
@ -146,7 +220,7 @@ func (am *Manager) expire(addr common.Address, u *unlocked, timeout time.Duratio
case <-u.abort:
// just quit
case <-t.C:
am.mutex.Lock()
am.mu.Lock()
// only drop if it's still the same key instance that dropLater
// was launched with. we can check that using pointer equality
// because the map stores a new pointer every time the key is
@ -155,46 +229,91 @@ func (am *Manager) expire(addr common.Address, u *unlocked, timeout time.Duratio
zeroKey(u.PrivateKey)
delete(am.unlocked, addr)
}
am.mutex.Unlock()
am.mu.Unlock()
}
}
func (am *Manager) NewAccount(auth string) (Account, error) {
key, err := am.keyStore.GenerateNewKey(crand.Reader, auth)
// NewAccount generates a new key and stores it into the key directory,
// encrypting it with the passphrase.
func (am *Manager) NewAccount(passphrase string) (Account, error) {
_, account, err := storeNewKey(am.keyStore, crand.Reader, passphrase)
if err != nil {
return Account{}, err
}
return Account{Address: key.Address}, nil
// Add the account to the cache immediately rather
// than waiting for file system notifications to pick it up.
am.cache.add(account)
return account, nil
}
func (am *Manager) AddressByIndex(index int) (addr string, err error) {
var addrs []common.Address
addrs, err = am.keyStore.GetKeyAddresses()
// AccountByIndex returns the ith account.
func (am *Manager) AccountByIndex(i int) (Account, error) {
accounts := am.Accounts()
if i < 0 || i >= len(accounts) {
return Account{}, fmt.Errorf("account index %d out of range [0, %d]", i, len(accounts)-1)
}
return accounts[i], nil
}
// Export exports as a JSON key, encrypted with newPassphrase.
func (am *Manager) Export(a Account, passphrase, newPassphrase string) (keyJSON []byte, err error) {
_, key, err := am.getDecryptedKey(a, passphrase)
if err != nil {
return
}
if index < 0 || index >= len(addrs) {
err = fmt.Errorf("index out of range: %d (should be 0-%d)", index, len(addrs)-1)
} else {
addr = addrs[index].Hex()
}
return
}
func (am *Manager) Accounts() ([]Account, error) {
addresses, err := am.keyStore.GetKeyAddresses()
if os.IsNotExist(err) {
return nil, ErrNoKeys
} else if err != nil {
return nil, err
}
accounts := make([]Account, len(addresses))
for i, addr := range addresses {
accounts[i] = Account{
Address: addr,
}
var N, P int
if store, ok := am.keyStore.(*keyStorePassphrase); ok {
N, P = store.scryptN, store.scryptP
} else {
N, P = StandardScryptN, StandardScryptP
}
return accounts, err
return EncryptKey(key, newPassphrase, N, P)
}
// Import stores the given encrypted JSON key into the key directory.
func (am *Manager) Import(keyJSON []byte, passphrase, newPassphrase string) (Account, error) {
key, err := DecryptKey(keyJSON, passphrase)
if key != nil && key.PrivateKey != nil {
defer zeroKey(key.PrivateKey)
}
if err != nil {
return Account{}, err
}
return am.importKey(key, newPassphrase)
}
// ImportECDSA stores the given key into the key directory, encrypting it with the passphrase.
func (am *Manager) ImportECDSA(priv *ecdsa.PrivateKey, passphrase string) (Account, error) {
return am.importKey(newKeyFromECDSA(priv), passphrase)
}
func (am *Manager) importKey(key *Key, passphrase string) (Account, error) {
a := Account{Address: key.Address, File: am.keyStore.JoinPath(keyFileName(key.Address))}
if err := am.keyStore.StoreKey(a.File, key, passphrase); err != nil {
return Account{}, err
}
am.cache.add(a)
return a, nil
}
// Update changes the passphrase of an existing account.
func (am *Manager) Update(a Account, passphrase, newPassphrase string) error {
a, key, err := am.getDecryptedKey(a, passphrase)
if err != nil {
return err
}
return am.keyStore.StoreKey(a.File, key, newPassphrase)
}
// ImportPreSaleKey decrypts the given Ethereum presale wallet and stores
// a key file in the key directory. The key file is encrypted with the same passphrase.
func (am *Manager) ImportPreSaleKey(keyJSON []byte, passphrase string) (Account, error) {
a, _, err := importPreSaleKey(am.keyStore, keyJSON, passphrase)
if err != nil {
return a, err
}
am.cache.add(a)
return a, nil
}
// zeroKey zeroes a private key in memory.
@ -204,47 +323,3 @@ func zeroKey(k *ecdsa.PrivateKey) {
b[i] = 0
}
}
// USE WITH CAUTION = this will save an unencrypted private key on disk
// no cli or js interface
func (am *Manager) Export(path string, addr common.Address, keyAuth string) error {
key, err := am.keyStore.GetKey(addr, keyAuth)
if err != nil {
return err
}
return crypto.SaveECDSA(path, key.PrivateKey)
}
func (am *Manager) Import(path string, keyAuth string) (Account, error) {
privateKeyECDSA, err := crypto.LoadECDSA(path)
if err != nil {
return Account{}, err
}
key := crypto.NewKeyFromECDSA(privateKeyECDSA)
if err = am.keyStore.StoreKey(key, keyAuth); err != nil {
return Account{}, err
}
return Account{Address: key.Address}, nil
}
func (am *Manager) Update(addr common.Address, authFrom, authTo string) (err error) {
var key *crypto.Key
key, err = am.keyStore.GetKey(addr, authFrom)
if err == nil {
err = am.keyStore.StoreKey(key, authTo)
if err == nil {
am.keyStore.Cleanup(addr)
}
}
return
}
func (am *Manager) ImportPreSaleKey(keyJSON []byte, password string) (acc Account, err error) {
var key *crypto.Key
key, err = crypto.ImportPreSaleKey(am.keyStore, keyJSON, password)
if err != nil {
return
}
return Account{Address: key.Address}, nil
}

View File

@ -19,95 +19,132 @@ package accounts
import (
"io/ioutil"
"os"
"runtime"
"strings"
"testing"
"time"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/common"
)
var testSigData = make([]byte, 32)
func TestSign(t *testing.T) {
dir, ks := tmpKeyStore(t, crypto.NewKeyStorePlain)
func TestManager(t *testing.T) {
dir, am := tmpManager(t, true)
defer os.RemoveAll(dir)
a, err := am.NewAccount("foo")
if err != nil {
t.Fatal(err)
}
if !strings.HasPrefix(a.File, dir) {
t.Errorf("account file %s doesn't have dir prefix", a.File)
}
stat, err := os.Stat(a.File)
if err != nil {
t.Fatalf("account file %s doesn't exist (%v)", a.File, err)
}
if runtime.GOOS != "windows" && stat.Mode() != 0600 {
t.Fatalf("account file has wrong mode: got %o, want %o", stat.Mode(), 0600)
}
if !am.HasAddress(a.Address) {
t.Errorf("HasAccount(%x) should've returned true", a.Address)
}
if err := am.Update(a, "foo", "bar"); err != nil {
t.Errorf("Update error: %v", err)
}
if err := am.DeleteAccount(a, "bar"); err != nil {
t.Errorf("DeleteAccount error: %v", err)
}
if common.FileExist(a.File) {
t.Errorf("account file %s should be gone after DeleteAccount", a.File)
}
if am.HasAddress(a.Address) {
t.Errorf("HasAccount(%x) should've returned true after DeleteAccount", a.Address)
}
}
func TestSign(t *testing.T) {
dir, am := tmpManager(t, true)
defer os.RemoveAll(dir)
am := NewManager(ks)
pass := "" // not used but required by API
a1, err := am.NewAccount(pass)
am.Unlock(a1.Address, "")
_, err = am.Sign(a1, testSigData)
if err != nil {
t.Fatal(err)
}
if err := am.Unlock(a1, ""); err != nil {
t.Fatal(err)
}
if _, err := am.Sign(a1.Address, testSigData); err != nil {
t.Fatal(err)
}
}
func TestTimedUnlock(t *testing.T) {
dir, ks := tmpKeyStore(t, crypto.NewKeyStorePlain)
dir, am := tmpManager(t, true)
defer os.RemoveAll(dir)
am := NewManager(ks)
pass := "foo"
a1, err := am.NewAccount(pass)
// Signing without passphrase fails because account is locked
_, err = am.Sign(a1, testSigData)
_, err = am.Sign(a1.Address, testSigData)
if err != ErrLocked {
t.Fatal("Signing should've failed with ErrLocked before unlocking, got ", err)
}
// Signing with passphrase works
if err = am.TimedUnlock(a1.Address, pass, 100*time.Millisecond); err != nil {
if err = am.TimedUnlock(a1, pass, 100*time.Millisecond); err != nil {
t.Fatal(err)
}
// Signing without passphrase works because account is temp unlocked
_, err = am.Sign(a1, testSigData)
_, err = am.Sign(a1.Address, testSigData)
if err != nil {
t.Fatal("Signing shouldn't return an error after unlocking, got ", err)
}
// Signing fails again after automatic locking
time.Sleep(350 * time.Millisecond)
_, err = am.Sign(a1, testSigData)
time.Sleep(250 * time.Millisecond)
_, err = am.Sign(a1.Address, testSigData)
if err != ErrLocked {
t.Fatal("Signing should've failed with ErrLocked timeout expired, got ", err)
}
}
func TestOverrideUnlock(t *testing.T) {
dir, ks := tmpKeyStore(t, crypto.NewKeyStorePlain)
dir, am := tmpManager(t, false)
defer os.RemoveAll(dir)
am := NewManager(ks)
pass := "foo"
a1, err := am.NewAccount(pass)
// Unlock indefinitely
if err = am.Unlock(a1.Address, pass); err != nil {
// Unlock indefinitely.
if err = am.TimedUnlock(a1, pass, 5*time.Minute); err != nil {
t.Fatal(err)
}
// Signing without passphrase works because account is temp unlocked
_, err = am.Sign(a1, testSigData)
_, err = am.Sign(a1.Address, testSigData)
if err != nil {
t.Fatal("Signing shouldn't return an error after unlocking, got ", err)
}
// reset unlock to a shorter period, invalidates the previous unlock
if err = am.TimedUnlock(a1.Address, pass, 100*time.Millisecond); err != nil {
if err = am.TimedUnlock(a1, pass, 100*time.Millisecond); err != nil {
t.Fatal(err)
}
// Signing without passphrase still works because account is temp unlocked
_, err = am.Sign(a1, testSigData)
_, err = am.Sign(a1.Address, testSigData)
if err != nil {
t.Fatal("Signing shouldn't return an error after unlocking, got ", err)
}
// Signing fails again after automatic locking
time.Sleep(150 * time.Millisecond)
_, err = am.Sign(a1, testSigData)
time.Sleep(250 * time.Millisecond)
_, err = am.Sign(a1.Address, testSigData)
if err != ErrLocked {
t.Fatal("Signing should've failed with ErrLocked timeout expired, got ", err)
}
@ -115,22 +152,21 @@ func TestOverrideUnlock(t *testing.T) {
// This test should fail under -race if signing races the expiration goroutine.
func TestSignRace(t *testing.T) {
dir, ks := tmpKeyStore(t, crypto.NewKeyStorePlain)
dir, am := tmpManager(t, false)
defer os.RemoveAll(dir)
// Create a test account.
am := NewManager(ks)
a1, err := am.NewAccount("")
if err != nil {
t.Fatal("could not create the test account", err)
}
if err := am.TimedUnlock(a1.Address, "", 15*time.Millisecond); err != nil {
t.Fatalf("could not unlock the test account", err)
if err := am.TimedUnlock(a1, "", 15*time.Millisecond); err != nil {
t.Fatal("could not unlock the test account", err)
}
end := time.Now().Add(500 * time.Millisecond)
for time.Now().Before(end) {
if _, err := am.Sign(a1, testSigData); err == ErrLocked {
if _, err := am.Sign(a1.Address, testSigData); err == ErrLocked {
return
} else if err != nil {
t.Errorf("Sign error: %v", err)
@ -141,10 +177,14 @@ func TestSignRace(t *testing.T) {
t.Errorf("Account did not lock within the timeout")
}
func tmpKeyStore(t *testing.T, new func(string) crypto.KeyStore) (string, crypto.KeyStore) {
func tmpManager(t *testing.T, encrypted bool) (string, *Manager) {
d, err := ioutil.TempDir("", "eth-keystore-test")
if err != nil {
t.Fatal(err)
}
new := NewPlaintextManager
if encrypted {
new = func(kd string) *Manager { return NewManager(kd, veryLightScryptN, veryLightScryptP) }
}
return d, new(d)
}

269
accounts/addrcache.go Normal file
View File

@ -0,0 +1,269 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package accounts
import (
"bufio"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strings"
"sync"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
)
// Minimum amount of time between cache reloads. This limit applies if the platform does
// not support change notifications. It also applies if the keystore directory does not
// exist yet, the code will attempt to create a watcher at most this often.
const minReloadInterval = 2 * time.Second
type accountsByFile []Account
func (s accountsByFile) Len() int { return len(s) }
func (s accountsByFile) Less(i, j int) bool { return s[i].File < s[j].File }
func (s accountsByFile) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// AmbiguousAddrError is returned when attempting to unlock
// an address for which more than one file exists.
type AmbiguousAddrError struct {
Addr common.Address
Matches []Account
}
func (err *AmbiguousAddrError) Error() string {
files := ""
for i, a := range err.Matches {
files += a.File
if i < len(err.Matches)-1 {
files += ", "
}
}
return fmt.Sprintf("multiple keys match address (%s)", files)
}
// addrCache is a live index of all accounts in the keystore.
type addrCache struct {
keydir string
watcher *watcher
mu sync.Mutex
all accountsByFile
byAddr map[common.Address][]Account
throttle *time.Timer
}
func newAddrCache(keydir string) *addrCache {
ac := &addrCache{
keydir: keydir,
byAddr: make(map[common.Address][]Account),
}
ac.watcher = newWatcher(ac)
return ac
}
func (ac *addrCache) accounts() []Account {
ac.maybeReload()
ac.mu.Lock()
defer ac.mu.Unlock()
cpy := make([]Account, len(ac.all))
copy(cpy, ac.all)
return cpy
}
func (ac *addrCache) hasAddress(addr common.Address) bool {
ac.maybeReload()
ac.mu.Lock()
defer ac.mu.Unlock()
return len(ac.byAddr[addr]) > 0
}
func (ac *addrCache) add(newAccount Account) {
ac.mu.Lock()
defer ac.mu.Unlock()
i := sort.Search(len(ac.all), func(i int) bool { return ac.all[i].File >= newAccount.File })
if i < len(ac.all) && ac.all[i] == newAccount {
return
}
// newAccount is not in the cache.
ac.all = append(ac.all, Account{})
copy(ac.all[i+1:], ac.all[i:])
ac.all[i] = newAccount
ac.byAddr[newAccount.Address] = append(ac.byAddr[newAccount.Address], newAccount)
}
// note: removed needs to be unique here (i.e. both File and Address must be set).
func (ac *addrCache) delete(removed Account) {
ac.mu.Lock()
defer ac.mu.Unlock()
ac.all = removeAccount(ac.all, removed)
if ba := removeAccount(ac.byAddr[removed.Address], removed); len(ba) == 0 {
delete(ac.byAddr, removed.Address)
} else {
ac.byAddr[removed.Address] = ba
}
}
func removeAccount(slice []Account, elem Account) []Account {
for i := range slice {
if slice[i] == elem {
return append(slice[:i], slice[i+1:]...)
}
}
return slice
}
// find returns the cached account for address if there is a unique match.
// The exact matching rules are explained by the documentation of Account.
// Callers must hold ac.mu.
func (ac *addrCache) find(a Account) (Account, error) {
// Limit search to address candidates if possible.
matches := ac.all
if (a.Address != common.Address{}) {
matches = ac.byAddr[a.Address]
}
if a.File != "" {
// If only the basename is specified, complete the path.
if !strings.ContainsRune(a.File, filepath.Separator) {
a.File = filepath.Join(ac.keydir, a.File)
}
for i := range matches {
if matches[i].File == a.File {
return matches[i], nil
}
}
if (a.Address == common.Address{}) {
return Account{}, ErrNoMatch
}
}
switch len(matches) {
case 1:
return matches[0], nil
case 0:
return Account{}, ErrNoMatch
default:
err := &AmbiguousAddrError{Addr: a.Address, Matches: make([]Account, len(matches))}
copy(err.Matches, matches)
return Account{}, err
}
}
func (ac *addrCache) maybeReload() {
ac.mu.Lock()
defer ac.mu.Unlock()
if ac.watcher.running {
return // A watcher is running and will keep the cache up-to-date.
}
if ac.throttle == nil {
ac.throttle = time.NewTimer(0)
} else {
select {
case <-ac.throttle.C:
default:
return // The cache was reloaded recently.
}
}
ac.watcher.start()
ac.reload()
ac.throttle.Reset(minReloadInterval)
}
func (ac *addrCache) close() {
ac.mu.Lock()
ac.watcher.close()
if ac.throttle != nil {
ac.throttle.Stop()
}
ac.mu.Unlock()
}
// reload caches addresses of existing accounts.
// Callers must hold ac.mu.
func (ac *addrCache) reload() {
accounts, err := ac.scan()
if err != nil && glog.V(logger.Debug) {
glog.Errorf("can't load keys: %v", err)
}
ac.all = accounts
sort.Sort(ac.all)
for k := range ac.byAddr {
delete(ac.byAddr, k)
}
for _, a := range accounts {
ac.byAddr[a.Address] = append(ac.byAddr[a.Address], a)
}
glog.V(logger.Debug).Infof("reloaded keys, cache has %d accounts", len(ac.all))
}
func (ac *addrCache) scan() ([]Account, error) {
files, err := ioutil.ReadDir(ac.keydir)
if err != nil {
return nil, err
}
var (
buf = new(bufio.Reader)
addrs []Account
keyJSON struct {
Address common.Address `json:"address"`
}
)
for _, fi := range files {
path := filepath.Join(ac.keydir, fi.Name())
if skipKeyFile(fi) {
glog.V(logger.Detail).Infof("ignoring file %s", path)
continue
}
fd, err := os.Open(path)
if err != nil {
glog.V(logger.Detail).Infoln(err)
continue
}
buf.Reset(fd)
// Parse the address.
keyJSON.Address = common.Address{}
err = json.NewDecoder(buf).Decode(&keyJSON)
switch {
case err != nil:
glog.V(logger.Debug).Infof("can't decode key %s: %v", path, err)
case (keyJSON.Address == common.Address{}):
glog.V(logger.Debug).Infof("can't decode key %s: missing or zero address", path)
default:
addrs = append(addrs, Account{Address: keyJSON.Address, File: path})
}
fd.Close()
}
return addrs, err
}
func skipKeyFile(fi os.FileInfo) bool {
// Skip editor backups and UNIX-style hidden files.
if strings.HasSuffix(fi.Name(), "~") || strings.HasPrefix(fi.Name(), ".") {
return true
}
// Skip misc special files, directories (yes, symlinks too).
if fi.IsDir() || fi.Mode()&os.ModeType != 0 {
return true
}
return false
}

283
accounts/addrcache_test.go Normal file
View File

@ -0,0 +1,283 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package accounts
import (
"fmt"
"math/rand"
"os"
"path/filepath"
"reflect"
"sort"
"testing"
"time"
"github.com/cespare/cp"
"github.com/davecgh/go-spew/spew"
"github.com/ethereum/go-ethereum/common"
)
var (
cachetestDir, _ = filepath.Abs(filepath.Join("testdata", "keystore"))
cachetestAccounts = []Account{
{
Address: common.HexToAddress("7ef5a6135f1fd6a02593eedc869c6d41d934aef8"),
File: filepath.Join(cachetestDir, "UTC--2016-03-22T12-57-55.920751759Z--7ef5a6135f1fd6a02593eedc869c6d41d934aef8"),
},
{
Address: common.HexToAddress("f466859ead1932d743d622cb74fc058882e8648a"),
File: filepath.Join(cachetestDir, "aaa"),
},
{
Address: common.HexToAddress("289d485d9771714cce91d3393d764e1311907acc"),
File: filepath.Join(cachetestDir, "zzz"),
},
}
)
func TestWatchNewFile(t *testing.T) {
t.Parallel()
dir, am := tmpManager(t, false)
defer os.RemoveAll(dir)
// Ensure the watcher is started before adding any files.
am.Accounts()
time.Sleep(200 * time.Millisecond)
// Move in the files.
wantAccounts := make([]Account, len(cachetestAccounts))
for i := range cachetestAccounts {
a := cachetestAccounts[i]
a.File = filepath.Join(dir, filepath.Base(a.File))
wantAccounts[i] = a
if err := cp.CopyFile(a.File, cachetestAccounts[i].File); err != nil {
t.Fatal(err)
}
}
// am should see the accounts.
var list []Account
for d := 200 * time.Millisecond; d < 5*time.Second; d *= 2 {
list = am.Accounts()
if reflect.DeepEqual(list, wantAccounts) {
return
}
time.Sleep(d)
}
t.Errorf("got %s, want %s", spew.Sdump(list), spew.Sdump(wantAccounts))
}
func TestWatchNoDir(t *testing.T) {
t.Parallel()
// Create am but not the directory that it watches.
rand.Seed(time.Now().UnixNano())
dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-watch-test-%d-%d", os.Getpid(), rand.Int()))
am := NewManager(dir, LightScryptN, LightScryptP)
list := am.Accounts()
if len(list) > 0 {
t.Error("initial account list not empty:", list)
}
time.Sleep(100 * time.Millisecond)
// Create the directory and copy a key file into it.
os.MkdirAll(dir, 0700)
defer os.RemoveAll(dir)
file := filepath.Join(dir, "aaa")
if err := cp.CopyFile(file, cachetestAccounts[0].File); err != nil {
t.Fatal(err)
}
// am should see the account.
wantAccounts := []Account{cachetestAccounts[0]}
wantAccounts[0].File = file
for d := 200 * time.Millisecond; d < 8*time.Second; d *= 2 {
list = am.Accounts()
if reflect.DeepEqual(list, wantAccounts) {
return
}
time.Sleep(d)
}
t.Errorf("\ngot %v\nwant %v", list, wantAccounts)
}
func TestCacheInitialReload(t *testing.T) {
cache := newAddrCache(cachetestDir)
accounts := cache.accounts()
if !reflect.DeepEqual(accounts, cachetestAccounts) {
t.Fatalf("got initial accounts: %swant %s", spew.Sdump(accounts), spew.Sdump(cachetestAccounts))
}
}
func TestCacheAddDeleteOrder(t *testing.T) {
cache := newAddrCache("testdata/no-such-dir")
cache.watcher.running = true // prevent unexpected reloads
accounts := []Account{
{
Address: common.HexToAddress("095e7baea6a6c7c4c2dfeb977efac326af552d87"),
File: "-309830980",
},
{
Address: common.HexToAddress("2cac1adea150210703ba75ed097ddfe24e14f213"),
File: "ggg",
},
{
Address: common.HexToAddress("8bda78331c916a08481428e4b07c96d3e916d165"),
File: "zzzzzz-the-very-last-one.keyXXX",
},
{
Address: common.HexToAddress("d49ff4eeb0b2686ed89c0fc0f2b6ea533ddbbd5e"),
File: "SOMETHING.key",
},
{
Address: common.HexToAddress("7ef5a6135f1fd6a02593eedc869c6d41d934aef8"),
File: "UTC--2016-03-22T12-57-55.920751759Z--7ef5a6135f1fd6a02593eedc869c6d41d934aef8",
},
{
Address: common.HexToAddress("f466859ead1932d743d622cb74fc058882e8648a"),
File: "aaa",
},
{
Address: common.HexToAddress("289d485d9771714cce91d3393d764e1311907acc"),
File: "zzz",
},
}
for _, a := range accounts {
cache.add(a)
}
// Add some of them twice to check that they don't get reinserted.
cache.add(accounts[0])
cache.add(accounts[2])
// Check that the account list is sorted by filename.
wantAccounts := make([]Account, len(accounts))
copy(wantAccounts, accounts)
sort.Sort(accountsByFile(wantAccounts))
list := cache.accounts()
if !reflect.DeepEqual(list, wantAccounts) {
t.Fatalf("got accounts: %s\nwant %s", spew.Sdump(accounts), spew.Sdump(wantAccounts))
}
for _, a := range accounts {
if !cache.hasAddress(a.Address) {
t.Errorf("expected hasAccount(%x) to return true", a.Address)
}
}
if cache.hasAddress(common.HexToAddress("fd9bd350f08ee3c0c19b85a8e16114a11a60aa4e")) {
t.Errorf("expected hasAccount(%x) to return false", common.HexToAddress("fd9bd350f08ee3c0c19b85a8e16114a11a60aa4e"))
}
// Delete a few keys from the cache.
for i := 0; i < len(accounts); i += 2 {
cache.delete(wantAccounts[i])
}
cache.delete(Account{Address: common.HexToAddress("fd9bd350f08ee3c0c19b85a8e16114a11a60aa4e"), File: "something"})
// Check content again after deletion.
wantAccountsAfterDelete := []Account{
wantAccounts[1],
wantAccounts[3],
wantAccounts[5],
}
list = cache.accounts()
if !reflect.DeepEqual(list, wantAccountsAfterDelete) {
t.Fatalf("got accounts after delete: %s\nwant %s", spew.Sdump(list), spew.Sdump(wantAccountsAfterDelete))
}
for _, a := range wantAccountsAfterDelete {
if !cache.hasAddress(a.Address) {
t.Errorf("expected hasAccount(%x) to return true", a.Address)
}
}
if cache.hasAddress(wantAccounts[0].Address) {
t.Errorf("expected hasAccount(%x) to return false", wantAccounts[0].Address)
}
}
func TestCacheFind(t *testing.T) {
dir := filepath.Join("testdata", "dir")
cache := newAddrCache(dir)
cache.watcher.running = true // prevent unexpected reloads
accounts := []Account{
{
Address: common.HexToAddress("095e7baea6a6c7c4c2dfeb977efac326af552d87"),
File: filepath.Join(dir, "a.key"),
},
{
Address: common.HexToAddress("2cac1adea150210703ba75ed097ddfe24e14f213"),
File: filepath.Join(dir, "b.key"),
},
{
Address: common.HexToAddress("d49ff4eeb0b2686ed89c0fc0f2b6ea533ddbbd5e"),
File: filepath.Join(dir, "c.key"),
},
{
Address: common.HexToAddress("d49ff4eeb0b2686ed89c0fc0f2b6ea533ddbbd5e"),
File: filepath.Join(dir, "c2.key"),
},
}
for _, a := range accounts {
cache.add(a)
}
nomatchAccount := Account{
Address: common.HexToAddress("f466859ead1932d743d622cb74fc058882e8648a"),
File: filepath.Join(dir, "something"),
}
tests := []struct {
Query Account
WantResult Account
WantError error
}{
// by address
{Query: Account{Address: accounts[0].Address}, WantResult: accounts[0]},
// by file
{Query: Account{File: accounts[0].File}, WantResult: accounts[0]},
// by basename
{Query: Account{File: filepath.Base(accounts[0].File)}, WantResult: accounts[0]},
// by file and address
{Query: accounts[0], WantResult: accounts[0]},
// ambiguous address, tie resolved by file
{Query: accounts[2], WantResult: accounts[2]},
// ambiguous address error
{
Query: Account{Address: accounts[2].Address},
WantError: &AmbiguousAddrError{
Addr: accounts[2].Address,
Matches: []Account{accounts[2], accounts[3]},
},
},
// no match error
{Query: nomatchAccount, WantError: ErrNoMatch},
{Query: Account{File: nomatchAccount.File}, WantError: ErrNoMatch},
{Query: Account{File: filepath.Base(nomatchAccount.File)}, WantError: ErrNoMatch},
{Query: Account{Address: nomatchAccount.Address}, WantError: ErrNoMatch},
}
for i, test := range tests {
a, err := cache.find(test.Query)
if !reflect.DeepEqual(err, test.WantError) {
t.Errorf("test %d: error mismatch for query %v\ngot %q\nwant %q", i, test.Query, err, test.WantError)
continue
}
if a != test.WantResult {
t.Errorf("test %d: result mismatch for query %v\ngot %v\nwant %v", i, test.Query, a, test.WantResult)
continue
}
}
}

View File

@ -14,17 +14,23 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package crypto
package accounts
import (
"bytes"
"crypto/ecdsa"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/secp256k1"
"github.com/pborman/uuid"
)
@ -42,6 +48,15 @@ type Key struct {
PrivateKey *ecdsa.PrivateKey
}
type keyStore interface {
// Loads and decrypts the key from disk.
GetKey(addr common.Address, filename string, auth string) (*Key, error)
// Writes and encrypts the key.
StoreKey(filename string, k *Key, auth string) error
// Joins filename with the key directory unless it is already absolute.
JoinPath(filename string) string
}
type plainKeyJSON struct {
Address string `json:"address"`
PrivateKey string `json:"privatekey"`
@ -87,7 +102,7 @@ type scryptParamsJSON struct {
func (k *Key) MarshalJSON() (j []byte, err error) {
jStruct := plainKeyJSON{
hex.EncodeToString(k.Address[:]),
hex.EncodeToString(FromECDSA(k.PrivateKey)),
hex.EncodeToString(crypto.FromECDSA(k.PrivateKey)),
k.Id.String(),
version,
}
@ -116,39 +131,24 @@ func (k *Key) UnmarshalJSON(j []byte) (err error) {
}
k.Address = common.BytesToAddress(addr)
k.PrivateKey = ToECDSA(privkey)
k.PrivateKey = crypto.ToECDSA(privkey)
return nil
}
func NewKeyFromECDSA(privateKeyECDSA *ecdsa.PrivateKey) *Key {
func newKeyFromECDSA(privateKeyECDSA *ecdsa.PrivateKey) *Key {
id := uuid.NewRandom()
key := &Key{
Id: id,
Address: PubkeyToAddress(privateKeyECDSA.PublicKey),
Address: crypto.PubkeyToAddress(privateKeyECDSA.PublicKey),
PrivateKey: privateKeyECDSA,
}
return key
}
func NewKey(rand io.Reader) *Key {
randBytes := make([]byte, 64)
_, err := rand.Read(randBytes)
if err != nil {
panic("key generation: could not read from random source: " + err.Error())
}
reader := bytes.NewReader(randBytes)
privateKeyECDSA, err := ecdsa.GenerateKey(secp256k1.S256(), reader)
if err != nil {
panic("key generation: ecdsa.GenerateKey failed: " + err.Error())
}
return NewKeyFromECDSA(privateKeyECDSA)
}
// generate key whose address fits into < 155 bits so it can fit into
// the Direct ICAP spec. for simplicity and easier compatibility with
// other libs, we retry until the first byte is 0.
// NewKeyForDirectICAP generates a key whose address fits into < 155 bits so it can fit
// into the Direct ICAP spec. for simplicity and easier compatibility with other libs, we
// retry until the first byte is 0.
func NewKeyForDirectICAP(rand io.Reader) *Key {
randBytes := make([]byte, 64)
_, err := rand.Read(randBytes)
@ -160,9 +160,70 @@ func NewKeyForDirectICAP(rand io.Reader) *Key {
if err != nil {
panic("key generation: ecdsa.GenerateKey failed: " + err.Error())
}
key := NewKeyFromECDSA(privateKeyECDSA)
key := newKeyFromECDSA(privateKeyECDSA)
if !strings.HasPrefix(key.Address.Hex(), "0x00") {
return NewKeyForDirectICAP(rand)
}
return key
}
func newKey(rand io.Reader) (*Key, error) {
privateKeyECDSA, err := ecdsa.GenerateKey(secp256k1.S256(), rand)
if err != nil {
return nil, err
}
return newKeyFromECDSA(privateKeyECDSA), nil
}
func storeNewKey(ks keyStore, rand io.Reader, auth string) (*Key, Account, error) {
key, err := newKey(rand)
if err != nil {
return nil, Account{}, err
}
a := Account{Address: key.Address, File: ks.JoinPath(keyFileName(key.Address))}
if err := ks.StoreKey(a.File, key, auth); err != nil {
zeroKey(key.PrivateKey)
return nil, a, err
}
return key, a, err
}
func writeKeyFile(file string, content []byte) error {
// Create the keystore directory with appropriate permissions
// in case it is not present yet.
const dirPerm = 0700
if err := os.MkdirAll(filepath.Dir(file), dirPerm); err != nil {
return err
}
// Atomic write: create a temporary hidden file first
// then move it into place. TempFile assigns mode 0600.
f, err := ioutil.TempFile(filepath.Dir(file), "."+filepath.Base(file)+".tmp")
if err != nil {
return err
}
if _, err := f.Write(content); err != nil {
f.Close()
os.Remove(f.Name())
return err
}
f.Close()
return os.Rename(f.Name(), file)
}
// keyFileName implements the naming convention for keyfiles:
// UTC--<created_at UTC ISO8601>-<address hex>
func keyFileName(keyAddr common.Address) string {
ts := time.Now().UTC()
return fmt.Sprintf("UTC--%s--%s", toISO8601(ts), hex.EncodeToString(keyAddr[:]))
}
func toISO8601(t time.Time) string {
var tz string
name, offset := t.Zone()
if name == "UTC" {
tz = "Z"
} else {
tz = fmt.Sprintf("%03d00", offset/3600)
}
return fmt.Sprintf("%04d-%02d-%02dT%02d-%02d-%02d.%09d%s", t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second(), t.Nanosecond(), tz)
}

View File

@ -23,7 +23,7 @@ The crypto is documented at https://github.com/ethereum/wiki/wiki/Web3-Secret-St
*/
package crypto
package accounts
import (
"bytes"
@ -31,11 +31,12 @@ import (
"crypto/sha256"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"path/filepath"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/randentropy"
"github.com/pborman/uuid"
"golang.org/x/crypto/pbkdf2"
@ -63,32 +64,37 @@ type keyStorePassphrase struct {
scryptP int
}
func NewKeyStorePassphrase(path string, scryptN int, scryptP int) KeyStore {
return &keyStorePassphrase{path, scryptN, scryptP}
func (ks keyStorePassphrase) GetKey(addr common.Address, filename, auth string) (*Key, error) {
// Load the key from the keystore and decrypt its contents
keyjson, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
key, err := DecryptKey(keyjson, auth)
if err != nil {
return nil, err
}
// Make sure we're really operating on the requested key (no swap attacks)
if key.Address != addr {
return nil, fmt.Errorf("key content mismatch: have account %x, want %x", key.Address, addr)
}
return key, nil
}
func (ks keyStorePassphrase) GenerateNewKey(rand io.Reader, auth string) (key *Key, err error) {
return GenerateNewKeyDefault(ks, rand, auth)
}
func (ks keyStorePassphrase) GetKey(keyAddr common.Address, auth string) (key *Key, err error) {
return decryptKeyFromFile(ks.keysDirPath, keyAddr, auth)
}
func (ks keyStorePassphrase) Cleanup(keyAddr common.Address) (err error) {
return cleanup(ks.keysDirPath, keyAddr)
}
func (ks keyStorePassphrase) GetKeyAddresses() (addresses []common.Address, err error) {
return getKeyAddresses(ks.keysDirPath)
}
func (ks keyStorePassphrase) StoreKey(key *Key, auth string) error {
func (ks keyStorePassphrase) StoreKey(filename string, key *Key, auth string) error {
keyjson, err := EncryptKey(key, auth, ks.scryptN, ks.scryptP)
if err != nil {
return err
}
return writeKeyFile(key.Address, ks.keysDirPath, keyjson)
return writeKeyFile(filename, keyjson)
}
func (ks keyStorePassphrase) JoinPath(filename string) string {
if filepath.IsAbs(filename) {
return filename
} else {
return filepath.Join(ks.keysDirPath, filename)
}
}
// EncryptKey encrypts a key using the specified scrypt parameters into a json
@ -101,14 +107,14 @@ func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) {
return nil, err
}
encryptKey := derivedKey[:16]
keyBytes := FromECDSA(key.PrivateKey)
keyBytes := crypto.FromECDSA(key.PrivateKey)
iv := randentropy.GetEntropyCSPRNG(aes.BlockSize) // 16
cipherText, err := aesCTRXOR(encryptKey, keyBytes, iv)
if err != nil {
return nil, err
}
mac := Keccak256(derivedKey[16:32], cipherText)
mac := crypto.Keccak256(derivedKey[16:32], cipherText)
scryptParamsJSON := make(map[string]interface{}, 5)
scryptParamsJSON["n"] = scryptN
@ -138,14 +144,6 @@ func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) {
return json.Marshal(encryptedKeyJSONV3)
}
func (ks keyStorePassphrase) DeleteKey(keyAddr common.Address, auth string) error {
// only delete if correct passphrase is given
if _, err := decryptKeyFromFile(ks.keysDirPath, keyAddr, auth); err != nil {
return err
}
return deleteKey(ks.keysDirPath, keyAddr)
}
// DecryptKey decrypts a key from a json blob, returning the private key itself.
func DecryptKey(keyjson []byte, auth string) (*Key, error) {
// Parse the json into a simple map to fetch the key version
@ -175,31 +173,14 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) {
if err != nil {
return nil, err
}
key := ToECDSA(keyBytes)
key := crypto.ToECDSA(keyBytes)
return &Key{
Id: uuid.UUID(keyId),
Address: PubkeyToAddress(key.PublicKey),
Address: crypto.PubkeyToAddress(key.PublicKey),
PrivateKey: key,
}, nil
}
func decryptKeyFromFile(keysDirPath string, keyAddr common.Address, auth string) (*Key, error) {
// Load the key from the keystore and decrypt its contents
keyjson, err := getKeyFile(keysDirPath, keyAddr)
if err != nil {
return nil, err
}
key, err := DecryptKey(keyjson, auth)
if err != nil {
return nil, err
}
// Make sure we're really operating on the requested key (no swap attacks)
if keyAddr != key.Address {
return nil, fmt.Errorf("key content mismatch: have account %x, want %x", key.Address, keyAddr)
}
return key, nil
}
func decryptKeyV3(keyProtected *encryptedKeyJSONV3, auth string) (keyBytes []byte, keyId []byte, err error) {
if keyProtected.Version != version {
return nil, nil, fmt.Errorf("Version not supported: %v", keyProtected.Version)
@ -230,9 +211,9 @@ func decryptKeyV3(keyProtected *encryptedKeyJSONV3, auth string) (keyBytes []byt
return nil, nil, err
}
calculatedMAC := Keccak256(derivedKey[16:32], cipherText)
calculatedMAC := crypto.Keccak256(derivedKey[16:32], cipherText)
if !bytes.Equal(calculatedMAC, mac) {
return nil, nil, errors.New("Decryption failed: MAC mismatch")
return nil, nil, ErrDecrypt
}
plainText, err := aesCTRXOR(derivedKey[:16], cipherText, iv)
@ -264,12 +245,12 @@ func decryptKeyV1(keyProtected *encryptedKeyJSONV1, auth string) (keyBytes []byt
return nil, nil, err
}
calculatedMAC := Keccak256(derivedKey[16:32], cipherText)
calculatedMAC := crypto.Keccak256(derivedKey[16:32], cipherText)
if !bytes.Equal(calculatedMAC, mac) {
return nil, nil, errors.New("Decryption failed: MAC mismatch")
return nil, nil, ErrDecrypt
}
plainText, err := aesCBCDecrypt(Keccak256(derivedKey[:16])[:16], cipherText, iv)
plainText, err := aesCBCDecrypt(crypto.Keccak256(derivedKey[:16])[:16], cipherText, iv)
if err != nil {
return nil, nil, err
}
@ -294,13 +275,13 @@ func getKDFKey(cryptoJSON cryptoJSON, auth string) ([]byte, error) {
c := ensureInt(cryptoJSON.KDFParams["c"])
prf := cryptoJSON.KDFParams["prf"].(string)
if prf != "hmac-sha256" {
return nil, fmt.Errorf("Unsupported PBKDF2 PRF: ", prf)
return nil, fmt.Errorf("Unsupported PBKDF2 PRF: %s", prf)
}
key := pbkdf2.Key(authArray, salt, c, dkLen, sha256.New)
return key, nil
}
return nil, fmt.Errorf("Unsupported KDF: ", cryptoJSON.KDF)
return nil, fmt.Errorf("Unsupported KDF: %s", cryptoJSON.KDF)
}
// TODO: can we do without this when unmarshalling dynamic JSON?

View File

@ -14,25 +14,34 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package crypto
package accounts
import (
"io/ioutil"
"testing"
"github.com/ethereum/go-ethereum/common"
)
const (
veryLightScryptN = 2
veryLightScryptP = 1
)
// Tests that a json key file can be decrypted and encrypted in multiple rounds.
func TestKeyEncryptDecrypt(t *testing.T) {
address := common.HexToAddress("f626acac23772cbe04dd578bee681b06bdefb9fa")
keyjson := []byte("{\"address\":\"f626acac23772cbe04dd578bee681b06bdefb9fa\",\"crypto\":{\"cipher\":\"aes-128-ctr\",\"ciphertext\":\"1bcf0ab9b14459795ce59f63e63255ffd84dc38d31614a5a78e37144d7e4a17f\",\"cipherparams\":{\"iv\":\"df4c7e225ee2d81adef522013e3fbe24\"},\"kdf\":\"scrypt\",\"kdfparams\":{\"dklen\":32,\"n\":262144,\"p\":1,\"r\":8,\"salt\":\"2909a99dd2bfa7079a4b40991773b1083f8512c0c55b9b63402ab0e3dc8db8b3\"},\"mac\":\"4ecf6a4ad92ae2c016cb7c44abade74799480c3303eb024661270dfefdbc7510\"},\"id\":\"b4718210-9a30-4883-b8a6-dbdd08bd0ceb\",\"version\":3}")
keyjson, err := ioutil.ReadFile("testdata/very-light-scrypt.json")
if err != nil {
t.Fatal(err)
}
password := ""
address := common.HexToAddress("45dea0fb0bba44f4fcf290bba71fd57d7117cbb8")
// Do a few rounds of decryption and encryption
for i := 0; i < 3; i++ {
// Try a bad password first
if _, err := DecryptKey(keyjson, password+"bad"); err == nil {
t.Error("test %d: json key decrypted with bad password", i)
t.Errorf("test %d: json key decrypted with bad password", i)
}
// Decrypt with the correct password
key, err := DecryptKey(keyjson, password)
@ -44,8 +53,8 @@ func TestKeyEncryptDecrypt(t *testing.T) {
}
// Recrypt with a new password and start over
password += "new data appended"
if keyjson, err = EncryptKey(key, password, LightScryptN, LightScryptP); err != nil {
t.Errorf("test %d: failed to recrypt key %v", err)
if keyjson, err = EncryptKey(key, password, veryLightScryptN, veryLightScryptP); err != nil {
t.Errorf("test %d: failed to recrypt key %v", i, err)
}
}
}

View File

@ -0,0 +1,62 @@
// Copyright 2014 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package accounts
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"github.com/ethereum/go-ethereum/common"
)
type keyStorePlain struct {
keysDirPath string
}
func (ks keyStorePlain) GetKey(addr common.Address, filename, auth string) (*Key, error) {
fd, err := os.Open(filename)
if err != nil {
return nil, err
}
defer fd.Close()
key := new(Key)
if err := json.NewDecoder(fd).Decode(key); err != nil {
return nil, err
}
if key.Address != addr {
return nil, fmt.Errorf("key content mismatch: have address %x, want %x", key.Address, addr)
}
return key, nil
}
func (ks keyStorePlain) StoreKey(filename string, key *Key, auth string) error {
content, err := json.Marshal(key)
if err != nil {
return err
}
return writeKeyFile(filename, content)
}
func (ks keyStorePlain) JoinPath(filename string) string {
if filepath.IsAbs(filename) {
return filename
} else {
return filepath.Join(ks.keysDirPath, filename)
}
}

View File

@ -14,112 +14,114 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package crypto
package accounts
import (
"crypto/rand"
"encoding/hex"
"fmt"
"io/ioutil"
"os"
"reflect"
"strings"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto/randentropy"
"github.com/ethereum/go-ethereum/crypto"
)
func tmpKeyStore(t *testing.T, encrypted bool) (dir string, ks keyStore) {
d, err := ioutil.TempDir("", "geth-keystore-test")
if err != nil {
t.Fatal(err)
}
if encrypted {
ks = &keyStorePassphrase{d, veryLightScryptN, veryLightScryptP}
} else {
ks = &keyStorePlain{d}
}
return d, ks
}
func TestKeyStorePlain(t *testing.T) {
ks := NewKeyStorePlain(common.DefaultDataDir())
dir, ks := tmpKeyStore(t, false)
defer os.RemoveAll(dir)
pass := "" // not used but required by API
k1, err := ks.GenerateNewKey(randentropy.Reader, pass)
k1, account, err := storeNewKey(ks, rand.Reader, pass)
if err != nil {
t.Fatal(err)
}
k2 := new(Key)
k2, err = ks.GetKey(k1.Address, pass)
k2, err := ks.GetKey(k1.Address, account.File, pass)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(k1.Address, k2.Address) {
t.Fatal(err)
}
if !reflect.DeepEqual(k1.PrivateKey, k2.PrivateKey) {
t.Fatal(err)
}
err = ks.DeleteKey(k2.Address, pass)
if err != nil {
t.Fatal(err)
}
}
func TestKeyStorePassphrase(t *testing.T) {
ks := NewKeyStorePassphrase(common.DefaultDataDir(), LightScryptN, LightScryptP)
dir, ks := tmpKeyStore(t, true)
defer os.RemoveAll(dir)
pass := "foo"
k1, err := ks.GenerateNewKey(randentropy.Reader, pass)
k1, account, err := storeNewKey(ks, rand.Reader, pass)
if err != nil {
t.Fatal(err)
}
k2 := new(Key)
k2, err = ks.GetKey(k1.Address, pass)
k2, err := ks.GetKey(k1.Address, account.File, pass)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(k1.Address, k2.Address) {
t.Fatal(err)
}
if !reflect.DeepEqual(k1.PrivateKey, k2.PrivateKey) {
t.Fatal(err)
}
err = ks.DeleteKey(k2.Address, pass) // also to clean up created files
if err != nil {
t.Fatal(err)
}
}
func TestKeyStorePassphraseDecryptionFail(t *testing.T) {
ks := NewKeyStorePassphrase(common.DefaultDataDir(), LightScryptN, LightScryptP)
dir, ks := tmpKeyStore(t, true)
defer os.RemoveAll(dir)
pass := "foo"
k1, err := ks.GenerateNewKey(randentropy.Reader, pass)
k1, account, err := storeNewKey(ks, rand.Reader, pass)
if err != nil {
t.Fatal(err)
}
_, err = ks.GetKey(k1.Address, "bar") // wrong passphrase
if err == nil {
t.Fatal(err)
}
err = ks.DeleteKey(k1.Address, "bar") // wrong passphrase
if err == nil {
t.Fatal(err)
}
err = ks.DeleteKey(k1.Address, pass) // to clean up
if err != nil {
t.Fatal(err)
if _, err = ks.GetKey(k1.Address, account.File, "bar"); err != ErrDecrypt {
t.Fatalf("wrong error for invalid passphrase\ngot %q\nwant %q", err, ErrDecrypt)
}
}
func TestImportPreSaleKey(t *testing.T) {
dir, ks := tmpKeyStore(t, true)
defer os.RemoveAll(dir)
// file content of a presale key file generated with:
// python pyethsaletool.py genwallet
// with password "foo"
fileContent := "{\"encseed\": \"26d87f5f2bf9835f9a47eefae571bc09f9107bb13d54ff12a4ec095d01f83897494cf34f7bed2ed34126ecba9db7b62de56c9d7cd136520a0427bfb11b8954ba7ac39b90d4650d3448e31185affcd74226a68f1e94b1108e6e0a4a91cdd83eba\", \"ethaddr\": \"d4584b5f6229b7be90727b0fc8c6b91bb427821f\", \"email\": \"gustav.simonsson@gmail.com\", \"btcaddr\": \"1EVknXyFC68kKNLkh6YnKzW41svSRoaAcx\"}"
ks := NewKeyStorePassphrase(common.DefaultDataDir(), LightScryptN, LightScryptP)
pass := "foo"
_, err := ImportPreSaleKey(ks, []byte(fileContent), pass)
account, _, err := importPreSaleKey(ks, []byte(fileContent), pass)
if err != nil {
t.Fatal(err)
}
if account.Address != common.HexToAddress("d4584b5f6229b7be90727b0fc8c6b91bb427821f") {
t.Errorf("imported account has wrong address %x", account.Address)
}
if !strings.HasPrefix(account.File, dir) {
t.Errorf("imported account file not in keystore directory: %q", account.File)
}
}
// Test and utils for the key store tests in the Ethereum JSON tests;
// tests/KeyStoreTests/basic_tests.json
// testdataKeyStoreTests/basic_tests.json
type KeyStoreTestV3 struct {
Json encryptedKeyJSONV3
Password string
@ -133,52 +135,57 @@ type KeyStoreTestV1 struct {
}
func TestV3_PBKDF2_1(t *testing.T) {
tests := loadKeyStoreTestV3("tests/v3_test_vector.json", t)
t.Parallel()
tests := loadKeyStoreTestV3("testdata/v3_test_vector.json", t)
testDecryptV3(tests["wikipage_test_vector_pbkdf2"], t)
}
func TestV3_PBKDF2_2(t *testing.T) {
t.Parallel()
tests := loadKeyStoreTestV3("../tests/files/KeyStoreTests/basic_tests.json", t)
testDecryptV3(tests["test1"], t)
}
func TestV3_PBKDF2_3(t *testing.T) {
t.Parallel()
tests := loadKeyStoreTestV3("../tests/files/KeyStoreTests/basic_tests.json", t)
testDecryptV3(tests["python_generated_test_with_odd_iv"], t)
}
func TestV3_PBKDF2_4(t *testing.T) {
t.Parallel()
tests := loadKeyStoreTestV3("../tests/files/KeyStoreTests/basic_tests.json", t)
testDecryptV3(tests["evilnonce"], t)
}
func TestV3_Scrypt_1(t *testing.T) {
tests := loadKeyStoreTestV3("tests/v3_test_vector.json", t)
t.Parallel()
tests := loadKeyStoreTestV3("testdata/v3_test_vector.json", t)
testDecryptV3(tests["wikipage_test_vector_scrypt"], t)
}
func TestV3_Scrypt_2(t *testing.T) {
t.Parallel()
tests := loadKeyStoreTestV3("../tests/files/KeyStoreTests/basic_tests.json", t)
testDecryptV3(tests["test2"], t)
}
func TestV1_1(t *testing.T) {
tests := loadKeyStoreTestV1("tests/v1_test_vector.json", t)
t.Parallel()
tests := loadKeyStoreTestV1("testdata/v1_test_vector.json", t)
testDecryptV1(tests["test1"], t)
}
func TestV1_2(t *testing.T) {
ks := NewKeyStorePassphrase("tests/v1", LightScryptN, LightScryptP)
t.Parallel()
ks := &keyStorePassphrase{"testdata/v1", LightScryptN, LightScryptP}
addr := common.HexToAddress("cb61d5a9c4896fb9658090b597ef0e7be6f7b67e")
k, err := ks.GetKey(addr, "g")
file := "testdata/v1/cb61d5a9c4896fb9658090b597ef0e7be6f7b67e/cb61d5a9c4896fb9658090b597ef0e7be6f7b67e"
k, err := ks.GetKey(addr, file, "g")
if err != nil {
t.Fatal(err)
}
if k.Address != addr {
t.Fatal(fmt.Errorf("Unexpected address: %v, expected %v", k.Address, addr))
}
privHex := hex.EncodeToString(FromECDSA(k.PrivateKey))
privHex := hex.EncodeToString(crypto.FromECDSA(k.PrivateKey))
expectedHex := "d1b1178d3529626a1a93e073f65028370d14c7eb0936eb42abef05db6f37ad7d"
if privHex != expectedHex {
t.Fatal(fmt.Errorf("Unexpected privkey: %v, expected %v", privHex, expectedHex))
@ -226,7 +233,8 @@ func loadKeyStoreTestV1(file string, t *testing.T) map[string]KeyStoreTestV1 {
}
func TestKeyForDirectICAP(t *testing.T) {
key := NewKeyForDirectICAP(randentropy.Reader)
t.Parallel()
key := NewKeyForDirectICAP(rand.Reader)
if !strings.HasPrefix(key.Address.Hex(), "0x00") {
t.Errorf("Expected first address byte to be zero, have: %s", key.Address.Hex())
}

132
accounts/presale.go Normal file
View File

@ -0,0 +1,132 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package accounts
import (
"crypto/aes"
"crypto/cipher"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"github.com/ethereum/go-ethereum/crypto"
"github.com/pborman/uuid"
"golang.org/x/crypto/pbkdf2"
)
// creates a Key and stores that in the given KeyStore by decrypting a presale key JSON
func importPreSaleKey(keyStore keyStore, keyJSON []byte, password string) (Account, *Key, error) {
key, err := decryptPreSaleKey(keyJSON, password)
if err != nil {
return Account{}, nil, err
}
key.Id = uuid.NewRandom()
a := Account{Address: key.Address, File: keyStore.JoinPath(keyFileName(key.Address))}
err = keyStore.StoreKey(a.File, key, password)
return a, key, err
}
func decryptPreSaleKey(fileContent []byte, password string) (key *Key, err error) {
preSaleKeyStruct := struct {
EncSeed string
EthAddr string
Email string
BtcAddr string
}{}
err = json.Unmarshal(fileContent, &preSaleKeyStruct)
if err != nil {
return nil, err
}
encSeedBytes, err := hex.DecodeString(preSaleKeyStruct.EncSeed)
iv := encSeedBytes[:16]
cipherText := encSeedBytes[16:]
/*
See https://github.com/ethereum/pyethsaletool
pyethsaletool generates the encryption key from password by
2000 rounds of PBKDF2 with HMAC-SHA-256 using password as salt (:().
16 byte key length within PBKDF2 and resulting key is used as AES key
*/
passBytes := []byte(password)
derivedKey := pbkdf2.Key(passBytes, passBytes, 2000, 16, sha256.New)
plainText, err := aesCBCDecrypt(derivedKey, cipherText, iv)
if err != nil {
return nil, err
}
ethPriv := crypto.Keccak256(plainText)
ecKey := crypto.ToECDSA(ethPriv)
key = &Key{
Id: nil,
Address: crypto.PubkeyToAddress(ecKey.PublicKey),
PrivateKey: ecKey,
}
derivedAddr := hex.EncodeToString(key.Address.Bytes()) // needed because .Hex() gives leading "0x"
expectedAddr := preSaleKeyStruct.EthAddr
if derivedAddr != expectedAddr {
err = fmt.Errorf("decrypted addr '%s' not equal to expected addr '%s'", derivedAddr, expectedAddr)
}
return key, err
}
func aesCTRXOR(key, inText, iv []byte) ([]byte, error) {
// AES-128 is selected due to size of encryptKey.
aesBlock, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
stream := cipher.NewCTR(aesBlock, iv)
outText := make([]byte, len(inText))
stream.XORKeyStream(outText, inText)
return outText, err
}
func aesCBCDecrypt(key, cipherText, iv []byte) ([]byte, error) {
aesBlock, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
decrypter := cipher.NewCBCDecrypter(aesBlock, iv)
paddedPlaintext := make([]byte, len(cipherText))
decrypter.CryptBlocks(paddedPlaintext, cipherText)
plaintext := pkcs7Unpad(paddedPlaintext)
if plaintext == nil {
return nil, ErrDecrypt
}
return plaintext, err
}
// From https://leanpub.com/gocrypto/read#leanpub-auto-block-cipher-modes
func pkcs7Unpad(in []byte) []byte {
if len(in) == 0 {
return nil
}
padding := in[len(in)-1]
if int(padding) > len(in) || padding > aes.BlockSize {
return nil
} else if padding == 0 {
return nil
}
for i := len(in) - 1; i > len(in)-int(padding)-1; i-- {
if in[i] != padding {
return nil
}
}
return in[:len(in)-int(padding)]
}

1
accounts/testdata/dupes/1 vendored Normal file
View File

@ -0,0 +1 @@
{"address":"f466859ead1932d743d622cb74fc058882e8648a","crypto":{"cipher":"aes-128-ctr","ciphertext":"cb664472deacb41a2e995fa7f96fe29ce744471deb8d146a0e43c7898c9ddd4d","cipherparams":{"iv":"dfd9ee70812add5f4b8f89d0811c9158"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"0d6769bf016d45c479213990d6a08d938469c4adad8a02ce507b4a4e7b7739f1"},"mac":"bac9af994b15a45dd39669fc66f9aa8a3b9dd8c22cb16e4d8d7ea089d0f1a1a9"},"id":"472e8b3d-afb6-45b5-8111-72c89895099a","version":3}

1
accounts/testdata/dupes/2 vendored Normal file
View File

@ -0,0 +1 @@
{"address":"f466859ead1932d743d622cb74fc058882e8648a","crypto":{"cipher":"aes-128-ctr","ciphertext":"cb664472deacb41a2e995fa7f96fe29ce744471deb8d146a0e43c7898c9ddd4d","cipherparams":{"iv":"dfd9ee70812add5f4b8f89d0811c9158"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"0d6769bf016d45c479213990d6a08d938469c4adad8a02ce507b4a4e7b7739f1"},"mac":"bac9af994b15a45dd39669fc66f9aa8a3b9dd8c22cb16e4d8d7ea089d0f1a1a9"},"id":"472e8b3d-afb6-45b5-8111-72c89895099a","version":3}

1
accounts/testdata/dupes/foo vendored Normal file
View File

@ -0,0 +1 @@
{"address":"7ef5a6135f1fd6a02593eedc869c6d41d934aef8","crypto":{"cipher":"aes-128-ctr","ciphertext":"1d0839166e7a15b9c1333fc865d69858b22df26815ccf601b28219b6192974e1","cipherparams":{"iv":"8df6caa7ff1b00c4e871f002cb7921ed"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"e5e6ef3f4ea695f496b643ebd3f75c0aa58ef4070e90c80c5d3fb0241bf1595c"},"mac":"6d16dfde774845e4585357f24bce530528bc69f4f84e1e22880d34fa45c273e5"},"id":"950077c7-71e3-4c44-a4a1-143919141ed4","version":3}

View File

@ -0,0 +1 @@
{"address":"f466859ead1932d743d622cb74fc058882e8648a","crypto":{"cipher":"aes-128-ctr","ciphertext":"cb664472deacb41a2e995fa7f96fe29ce744471deb8d146a0e43c7898c9ddd4d","cipherparams":{"iv":"dfd9ee70812add5f4b8f89d0811c9158"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"0d6769bf016d45c479213990d6a08d938469c4adad8a02ce507b4a4e7b7739f1"},"mac":"bac9af994b15a45dd39669fc66f9aa8a3b9dd8c22cb16e4d8d7ea089d0f1a1a9"},"id":"472e8b3d-afb6-45b5-8111-72c89895099a","version":3}

21
accounts/testdata/keystore/README vendored Normal file
View File

@ -0,0 +1,21 @@
This directory contains accounts for testing.
The passphrase that unlocks them is "foobar".
The "good" key files which are supposed to be loadable are:
- File: UTC--2016-03-22T12-57-55.920751759Z--7ef5a6135f1fd6a02593eedc869c6d41d934aef8
Address: 0x7ef5a6135f1fd6a02593eedc869c6d41d934aef8
- File: aaa
Address: 0xf466859ead1932d743d622cb74fc058882e8648a
- File: zzz
Address: 0x289d485d9771714cce91d3393d764e1311907acc
The other files (including this README) are broken in various ways
and should not be picked up by package accounts:
- File: no-address (missing address field, otherwise same as "aaa")
- File: garbage (file with random data)
- File: empty (file with no content)
- File: swapfile~ (should be skipped)
- File: .hiddenfile (should be skipped)
- File: foo/... (should be skipped because it is a directory)

View File

@ -0,0 +1 @@
{"address":"7ef5a6135f1fd6a02593eedc869c6d41d934aef8","crypto":{"cipher":"aes-128-ctr","ciphertext":"1d0839166e7a15b9c1333fc865d69858b22df26815ccf601b28219b6192974e1","cipherparams":{"iv":"8df6caa7ff1b00c4e871f002cb7921ed"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"e5e6ef3f4ea695f496b643ebd3f75c0aa58ef4070e90c80c5d3fb0241bf1595c"},"mac":"6d16dfde774845e4585357f24bce530528bc69f4f84e1e22880d34fa45c273e5"},"id":"950077c7-71e3-4c44-a4a1-143919141ed4","version":3}

1
accounts/testdata/keystore/aaa vendored Normal file
View File

@ -0,0 +1 @@
{"address":"f466859ead1932d743d622cb74fc058882e8648a","crypto":{"cipher":"aes-128-ctr","ciphertext":"cb664472deacb41a2e995fa7f96fe29ce744471deb8d146a0e43c7898c9ddd4d","cipherparams":{"iv":"dfd9ee70812add5f4b8f89d0811c9158"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"0d6769bf016d45c479213990d6a08d938469c4adad8a02ce507b4a4e7b7739f1"},"mac":"bac9af994b15a45dd39669fc66f9aa8a3b9dd8c22cb16e4d8d7ea089d0f1a1a9"},"id":"472e8b3d-afb6-45b5-8111-72c89895099a","version":3}

0
accounts/testdata/keystore/empty vendored Normal file
View File

View File

@ -0,0 +1 @@
{"address":"fd9bd350f08ee3c0c19b85a8e16114a11a60aa4e","crypto":{"cipher":"aes-128-ctr","ciphertext":"8124d5134aa4a927c79fd852989e4b5419397566f04b0936a1eb1d168c7c68a5","cipherparams":{"iv":"e2febe17176414dd2cda28287947eb2f"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":4096,"p":6,"r":8,"salt":"44b415ede89f3bdd6830390a21b78965f571b347a589d1d943029f016c5e8bd5"},"mac":"5e149ff25bfd9dd45746a84bb2bcd2f015f2cbca2b6d25c5de8c29617f71fe5b"},"id":"d6ac5452-2b2c-4d3c-ad80-4bf0327d971c","version":3}

BIN
accounts/testdata/keystore/garbage vendored Normal file

Binary file not shown.

1
accounts/testdata/keystore/no-address vendored Normal file
View File

@ -0,0 +1 @@
{"crypto":{"cipher":"aes-128-ctr","ciphertext":"cb664472deacb41a2e995fa7f96fe29ce744471deb8d146a0e43c7898c9ddd4d","cipherparams":{"iv":"dfd9ee70812add5f4b8f89d0811c9158"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"0d6769bf016d45c479213990d6a08d938469c4adad8a02ce507b4a4e7b7739f1"},"mac":"bac9af994b15a45dd39669fc66f9aa8a3b9dd8c22cb16e4d8d7ea089d0f1a1a9"},"id":"472e8b3d-afb6-45b5-8111-72c89895099a","version":3}

1
accounts/testdata/keystore/zero vendored Normal file
View File

@ -0,0 +1 @@
{"address":"0000000000000000000000000000000000000000","crypto":{"cipher":"aes-128-ctr","ciphertext":"cb664472deacb41a2e995fa7f96fe29ce744471deb8d146a0e43c7898c9ddd4d","cipherparams":{"iv":"dfd9ee70812add5f4b8f89d0811c9158"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"0d6769bf016d45c479213990d6a08d938469c4adad8a02ce507b4a4e7b7739f1"},"mac":"bac9af994b15a45dd39669fc66f9aa8a3b9dd8c22cb16e4d8d7ea089d0f1a1a9"},"id":"472e8b3d-afb6-45b5-8111-72c89895099a","version":3}

1
accounts/testdata/keystore/zzz vendored Normal file
View File

@ -0,0 +1 @@
{"address":"289d485d9771714cce91d3393d764e1311907acc","crypto":{"cipher":"aes-128-ctr","ciphertext":"faf32ca89d286b107f5e6d842802e05263c49b78d46eac74e6109e9a963378ab","cipherparams":{"iv":"558833eec4a665a8c55608d7d503407d"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"d571fff447ffb24314f9513f5160246f09997b857ac71348b73e785aab40dc04"},"mac":"21edb85ff7d0dab1767b9bf498f2c3cb7be7609490756bd32300bb213b59effe"},"id":"3279afcf-55ba-43ff-8997-02dcc46a6525","version":3}

View File

@ -0,0 +1 @@
{"address":"45dea0fb0bba44f4fcf290bba71fd57d7117cbb8","crypto":{"cipher":"aes-128-ctr","ciphertext":"b87781948a1befd247bff51ef4063f716cf6c2d3481163e9a8f42e1f9bb74145","cipherparams":{"iv":"dc4926b48a105133d2f16b96833abf1e"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":2,"p":1,"r":8,"salt":"004244bbdc51cadda545b1cfa43cff9ed2ae88e08c61f1479dbb45410722f8f0"},"mac":"39990c1684557447940d4c69e06b1b82b2aceacb43f284df65c956daf3046b85"},"id":"ce541d8d-c79b-40f8-9f8c-20f59616faba","version":3}

113
accounts/watch.go Normal file
View File

@ -0,0 +1,113 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// +build darwin freebsd linux netbsd solaris windows
package accounts
import (
"time"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/rjeczalik/notify"
)
type watcher struct {
ac *addrCache
starting bool
running bool
ev chan notify.EventInfo
quit chan struct{}
}
func newWatcher(ac *addrCache) *watcher {
return &watcher{
ac: ac,
ev: make(chan notify.EventInfo, 10),
quit: make(chan struct{}),
}
}
// starts the watcher loop in the background.
// Start a watcher in the background if that's not already in progress.
// The caller must hold w.ac.mu.
func (w *watcher) start() {
if w.starting || w.running {
return
}
w.starting = true
go w.loop()
}
func (w *watcher) close() {
close(w.quit)
}
func (w *watcher) loop() {
defer func() {
w.ac.mu.Lock()
w.running = false
w.starting = false
w.ac.mu.Unlock()
}()
err := notify.Watch(w.ac.keydir, w.ev, notify.All)
if err != nil {
glog.V(logger.Detail).Infof("can't watch %s: %v", w.ac.keydir, err)
return
}
defer notify.Stop(w.ev)
glog.V(logger.Detail).Infof("now watching %s", w.ac.keydir)
defer glog.V(logger.Detail).Infof("no longer watching %s", w.ac.keydir)
w.ac.mu.Lock()
w.running = true
w.ac.mu.Unlock()
// Wait for file system events and reload.
// When an event occurs, the reload call is delayed a bit so that
// multiple events arriving quickly only cause a single reload.
var (
debounce = time.NewTimer(0)
debounceDuration = 500 * time.Millisecond
inCycle, hadEvent bool
)
defer debounce.Stop()
for {
select {
case <-w.quit:
return
case <-w.ev:
if !inCycle {
debounce.Reset(debounceDuration)
inCycle = true
} else {
hadEvent = true
}
case <-debounce.C:
w.ac.mu.Lock()
w.ac.reload()
w.ac.mu.Unlock()
if hadEvent {
debounce.Reset(debounceDuration)
inCycle, hadEvent = true, false
} else {
inCycle, hadEvent = false, false
}
}
}
}

View File

@ -0,0 +1,28 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// +build !darwin,!freebsd,!linux,!netbsd,!solaris,!windows
// This is the fallback implementation of directory watching.
// It is used on unsupported platforms.
package accounts
type watcher struct{ running bool }
func newWatcher(*addrCache) *watcher { return new(watcher) }
func (*watcher) start() {}
func (*watcher) close() {}

323
cmd/geth/accountcmd.go Normal file
View File

@ -0,0 +1,323 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"fmt"
"io/ioutil"
"github.com/codegangsta/cli"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
)
var (
walletCommand = cli.Command{
Name: "wallet",
Usage: "ethereum presale wallet",
Subcommands: []cli.Command{
{
Action: importWallet,
Name: "import",
Usage: "import ethereum presale wallet",
},
},
Description: `
get wallet import /path/to/my/presale.wallet
will prompt for your password and imports your ether presale account.
It can be used non-interactively with the --password option taking a
passwordfile as argument containing the wallet password in plaintext.
`}
accountCommand = cli.Command{
Action: accountList,
Name: "account",
Usage: "manage accounts",
Description: `
Manage accounts lets you create new accounts, list all existing accounts,
import a private key into a new account.
' help' shows a list of subcommands or help for one subcommand.
It supports interactive mode, when you are prompted for password as well as
non-interactive mode where passwords are supplied via a given password file.
Non-interactive mode is only meant for scripted use on test networks or known
safe environments.
Make sure you remember the password you gave when creating a new account (with
either new or import). Without it you are not able to unlock your account.
Note that exporting your key in unencrypted format is NOT supported.
Keys are stored under <DATADIR>/keys.
It is safe to transfer the entire directory or the individual keys therein
between ethereum nodes by simply copying.
Make sure you backup your keys regularly.
In order to use your account to send transactions, you need to unlock them using
the '--unlock' option. The argument is a space separated list of addresses or
indexes. If used non-interactively with a passwordfile, the file should contain
the respective passwords one per line. If you unlock n accounts and the password
file contains less than n entries, then the last password is meant to apply to
all remaining accounts.
And finally. DO NOT FORGET YOUR PASSWORD.
`,
Subcommands: []cli.Command{
{
Action: accountList,
Name: "list",
Usage: "print account addresses",
},
{
Action: accountCreate,
Name: "new",
Usage: "create a new account",
Description: `
ethereum account new
Creates a new account. Prints the address.
The account is saved in encrypted format, you are prompted for a passphrase.
You must remember this passphrase to unlock your account in the future.
For non-interactive use the passphrase can be specified with the --password flag:
ethereum --password <passwordfile> account new
Note, this is meant to be used for testing only, it is a bad idea to save your
password to file or expose in any other way.
`,
},
{
Action: accountUpdate,
Name: "update",
Usage: "update an existing account",
Description: `
ethereum account update <address>
Update an existing account.
The account is saved in the newest version in encrypted format, you are prompted
for a passphrase to unlock the account and another to save the updated file.
This same command can therefore be used to migrate an account of a deprecated
format to the newest format or change the password for an account.
For non-interactive use the passphrase can be specified with the --password flag:
ethereum --password <passwordfile> account update <address>
Since only one password can be given, only format update can be performed,
changing your password is only possible interactively.
`,
},
{
Action: accountImport,
Name: "import",
Usage: "import a private key into a new account",
Description: `
ethereum account import <keyfile>
Imports an unencrypted private key from <keyfile> and creates a new account.
Prints the address.
The keyfile is assumed to contain an unencrypted private key in hexadecimal format.
The account is saved in encrypted format, you are prompted for a passphrase.
You must remember this passphrase to unlock your account in the future.
For non-interactive use the passphrase can be specified with the -password flag:
ethereum --password <passwordfile> account import <keyfile>
Note:
As you can directly copy your encrypted accounts to another ethereum instance,
this import mechanism is not needed when you transfer an account between
nodes.
`,
},
},
}
)
func accountList(ctx *cli.Context) {
accman := utils.MakeAccountManager(ctx)
for i, acct := range accman.Accounts() {
fmt.Printf("Account #%d: {%x} %s\n", i, acct.Address, acct.File)
}
}
// tries unlocking the specified account a few times.
func unlockAccount(ctx *cli.Context, accman *accounts.Manager, address string, i int, passwords []string) (accounts.Account, string) {
account, err := utils.MakeAddress(accman, address)
if err != nil {
utils.Fatalf("Could not list accounts: %v", err)
}
for trials := 0; trials < 3; trials++ {
prompt := fmt.Sprintf("Unlocking account %s | Attempt %d/%d", address, trials+1, 3)
password := getPassPhrase(prompt, false, i, passwords)
err = accman.Unlock(account, password)
if err == nil {
glog.V(logger.Info).Infof("Unlocked account %x", account.Address)
return account, password
}
if err, ok := err.(*accounts.AmbiguousAddrError); ok {
glog.V(logger.Info).Infof("Unlocked account %x", account.Address)
return ambiguousAddrRecovery(accman, err, password), password
}
if err != accounts.ErrDecrypt {
// No need to prompt again if the error is not decryption-related.
break
}
}
// All trials expended to unlock account, bail out
utils.Fatalf("Failed to unlock account %s (%v)", address, err)
return accounts.Account{}, ""
}
// getPassPhrase retrieves the passwor associated with an account, either fetched
// from a list of preloaded passphrases, or requested interactively from the user.
func getPassPhrase(prompt string, confirmation bool, i int, passwords []string) string {
// If a list of passwords was supplied, retrieve from them
if len(passwords) > 0 {
if i < len(passwords) {
return passwords[i]
}
return passwords[len(passwords)-1]
}
// Otherwise prompt the user for the password
if prompt != "" {
fmt.Println(prompt)
}
password, err := utils.Stdin.PasswordPrompt("Passphrase: ")
if err != nil {
utils.Fatalf("Failed to read passphrase: %v", err)
}
if confirmation {
confirm, err := utils.Stdin.PasswordPrompt("Repeat passphrase: ")
if err != nil {
utils.Fatalf("Failed to read passphrase confirmation: %v", err)
}
if password != confirm {
utils.Fatalf("Passphrases do not match")
}
}
return password
}
func ambiguousAddrRecovery(am *accounts.Manager, err *accounts.AmbiguousAddrError, auth string) accounts.Account {
fmt.Printf("Multiple key files exist for address %x:\n", err.Addr)
for _, a := range err.Matches {
fmt.Println(" ", a.File)
}
fmt.Println("Testing your passphrase against all of them...")
var match *accounts.Account
for _, a := range err.Matches {
if err := am.Unlock(a, auth); err == nil {
match = &a
break
}
}
if match == nil {
utils.Fatalf("None of the listed files could be unlocked.")
}
fmt.Printf("Your passphrase unlocked %s\n", match.File)
fmt.Println("In order to avoid this warning, you need to remove the following duplicate key files:")
for _, a := range err.Matches {
if a != *match {
fmt.Println(" ", a.File)
}
}
return *match
}
// accountCreate creates a new account into the keystore defined by the CLI flags.
func accountCreate(ctx *cli.Context) {
accman := utils.MakeAccountManager(ctx)
password := getPassPhrase("Your new account is locked with a password. Please give a password. Do not forget this password.", true, 0, utils.MakePasswordList(ctx))
account, err := accman.NewAccount(password)
if err != nil {
utils.Fatalf("Failed to create account: %v", err)
}
fmt.Printf("Address: {%x}\n", account.Address)
}
// accountUpdate transitions an account from a previous format to the current
// one, also providing the possibility to change the pass-phrase.
func accountUpdate(ctx *cli.Context) {
if len(ctx.Args()) == 0 {
utils.Fatalf("No accounts specified to update")
}
accman := utils.MakeAccountManager(ctx)
account, oldPassword := unlockAccount(ctx, accman, ctx.Args().First(), 0, nil)
newPassword := getPassPhrase("Please give a new password. Do not forget this password.", true, 0, nil)
if err := accman.Update(account, oldPassword, newPassword); err != nil {
utils.Fatalf("Could not update the account: %v", err)
}
}
func importWallet(ctx *cli.Context) {
keyfile := ctx.Args().First()
if len(keyfile) == 0 {
utils.Fatalf("keyfile must be given as argument")
}
keyJson, err := ioutil.ReadFile(keyfile)
if err != nil {
utils.Fatalf("Could not read wallet file: %v", err)
}
accman := utils.MakeAccountManager(ctx)
passphrase := getPassPhrase("", false, 0, utils.MakePasswordList(ctx))
acct, err := accman.ImportPreSaleKey(keyJson, passphrase)
if err != nil {
utils.Fatalf("%v", err)
}
fmt.Printf("Address: {%x}\n", acct.Address)
}
func accountImport(ctx *cli.Context) {
keyfile := ctx.Args().First()
if len(keyfile) == 0 {
utils.Fatalf("keyfile must be given as argument")
}
key, err := crypto.LoadECDSA(keyfile)
if err != nil {
utils.Fatalf("keyfile must be given as argument")
}
accman := utils.MakeAccountManager(ctx)
passphrase := getPassPhrase("Your new account is locked with a password. Please give a password. Do not forget this password.", true, 0, utils.MakePasswordList(ctx))
acct, err := accman.ImportECDSA(key, passphrase)
if err != nil {
utils.Fatalf("Could not create the account: %v", err)
}
fmt.Printf("Address: {%x}\n", acct.Address)
}

292
cmd/geth/accountcmd_test.go Normal file
View File

@ -0,0 +1,292 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"io/ioutil"
"path/filepath"
"runtime"
"strings"
"testing"
"github.com/cespare/cp"
)
// These tests are 'smoke tests' for the account related
// subcommands and flags.
//
// For most tests, the test files from package accounts
// are copied into a temporary keystore directory.
func tmpDatadirWithKeystore(t *testing.T) string {
datadir := tmpdir(t)
keystore := filepath.Join(datadir, "keystore")
source := filepath.Join("..", "..", "accounts", "testdata", "keystore")
if err := cp.CopyAll(keystore, source); err != nil {
t.Fatal(err)
}
return datadir
}
func TestAccountListEmpty(t *testing.T) {
geth := runGeth(t, "account")
geth.expectExit()
}
func TestAccountList(t *testing.T) {
datadir := tmpDatadirWithKeystore(t)
geth := runGeth(t, "--datadir", datadir, "account")
defer geth.expectExit()
if runtime.GOOS == "windows" {
geth.expect(`
Account #0: {7ef5a6135f1fd6a02593eedc869c6d41d934aef8} {{.Datadir}}\keystore\UTC--2016-03-22T12-57-55.920751759Z--7ef5a6135f1fd6a02593eedc869c6d41d934aef8
Account #1: {f466859ead1932d743d622cb74fc058882e8648a} {{.Datadir}}\keystore\aaa
Account #2: {289d485d9771714cce91d3393d764e1311907acc} {{.Datadir}}\keystore\zzz
`)
} else {
geth.expect(`
Account #0: {7ef5a6135f1fd6a02593eedc869c6d41d934aef8} {{.Datadir}}/keystore/UTC--2016-03-22T12-57-55.920751759Z--7ef5a6135f1fd6a02593eedc869c6d41d934aef8
Account #1: {f466859ead1932d743d622cb74fc058882e8648a} {{.Datadir}}/keystore/aaa
Account #2: {289d485d9771714cce91d3393d764e1311907acc} {{.Datadir}}/keystore/zzz
`)
}
}
func TestAccountNew(t *testing.T) {
geth := runGeth(t, "--lightkdf", "account", "new")
defer geth.expectExit()
geth.expect(`
Your new account is locked with a password. Please give a password. Do not forget this password.
!! Unsupported terminal, password will be echoed.
Passphrase: {{.InputLine "foobar"}}
Repeat passphrase: {{.InputLine "foobar"}}
`)
geth.expectRegexp(`Address: \{[0-9a-f]{40}\}\n`)
}
func TestAccountNewBadRepeat(t *testing.T) {
geth := runGeth(t, "--lightkdf", "account", "new")
defer geth.expectExit()
geth.expect(`
Your new account is locked with a password. Please give a password. Do not forget this password.
!! Unsupported terminal, password will be echoed.
Passphrase: {{.InputLine "something"}}
Repeat passphrase: {{.InputLine "something else"}}
Fatal: Passphrases do not match
`)
}
func TestAccountUpdate(t *testing.T) {
datadir := tmpDatadirWithKeystore(t)
geth := runGeth(t,
"--datadir", datadir, "--lightkdf",
"account", "update", "f466859ead1932d743d622cb74fc058882e8648a")
defer geth.expectExit()
geth.expect(`
Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 1/3
!! Unsupported terminal, password will be echoed.
Passphrase: {{.InputLine "foobar"}}
Please give a new password. Do not forget this password.
Passphrase: {{.InputLine "foobar2"}}
Repeat passphrase: {{.InputLine "foobar2"}}
`)
}
func TestWalletImport(t *testing.T) {
geth := runGeth(t, "--lightkdf", "wallet", "import", "testdata/guswallet.json")
defer geth.expectExit()
geth.expect(`
!! Unsupported terminal, password will be echoed.
Passphrase: {{.InputLine "foo"}}
Address: {d4584b5f6229b7be90727b0fc8c6b91bb427821f}
`)
files, err := ioutil.ReadDir(filepath.Join(geth.Datadir, "keystore"))
if len(files) != 1 {
t.Errorf("expected one key file in keystore directory, found %d files (error: %v)", len(files), err)
}
}
func TestWalletImportBadPassword(t *testing.T) {
geth := runGeth(t, "--lightkdf", "wallet", "import", "testdata/guswallet.json")
defer geth.expectExit()
geth.expect(`
!! Unsupported terminal, password will be echoed.
Passphrase: {{.InputLine "wrong"}}
Fatal: could not decrypt key with given passphrase
`)
}
func TestUnlockFlag(t *testing.T) {
datadir := tmpDatadirWithKeystore(t)
geth := runGeth(t,
"--datadir", datadir, "--nat", "none", "--nodiscover", "--dev",
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a",
"js", "testdata/empty.js")
geth.expect(`
Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 1/3
!! Unsupported terminal, password will be echoed.
Passphrase: {{.InputLine "foobar"}}
`)
geth.expectExit()
wantMessages := []string{
"Unlocked account f466859ead1932d743d622cb74fc058882e8648a",
}
for _, m := range wantMessages {
if strings.Index(geth.stderrText(), m) == -1 {
t.Errorf("stderr text does not contain %q", m)
}
}
}
func TestUnlockFlagWrongPassword(t *testing.T) {
datadir := tmpDatadirWithKeystore(t)
geth := runGeth(t,
"--datadir", datadir, "--nat", "none", "--nodiscover", "--dev",
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a")
defer geth.expectExit()
geth.expect(`
Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 1/3
!! Unsupported terminal, password will be echoed.
Passphrase: {{.InputLine "wrong1"}}
Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 2/3
Passphrase: {{.InputLine "wrong2"}}
Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 3/3
Passphrase: {{.InputLine "wrong3"}}
Fatal: Failed to unlock account f466859ead1932d743d622cb74fc058882e8648a (could not decrypt key with given passphrase)
`)
}
// https://github.com/ethereum/go-ethereum/issues/1785
func TestUnlockFlagMultiIndex(t *testing.T) {
datadir := tmpDatadirWithKeystore(t)
geth := runGeth(t,
"--datadir", datadir, "--nat", "none", "--nodiscover", "--dev",
"--unlock", "0,2",
"js", "testdata/empty.js")
geth.expect(`
Unlocking account 0 | Attempt 1/3
!! Unsupported terminal, password will be echoed.
Passphrase: {{.InputLine "foobar"}}
Unlocking account 2 | Attempt 1/3
Passphrase: {{.InputLine "foobar"}}
`)
geth.expectExit()
wantMessages := []string{
"Unlocked account 7ef5a6135f1fd6a02593eedc869c6d41d934aef8",
"Unlocked account 289d485d9771714cce91d3393d764e1311907acc",
}
for _, m := range wantMessages {
if strings.Index(geth.stderrText(), m) == -1 {
t.Errorf("stderr text does not contain %q", m)
}
}
}
func TestUnlockFlagPasswordFile(t *testing.T) {
datadir := tmpDatadirWithKeystore(t)
geth := runGeth(t,
"--datadir", datadir, "--nat", "none", "--nodiscover", "--dev",
"--password", "testdata/passwords.txt", "--unlock", "0,2",
"js", "testdata/empty.js")
geth.expectExit()
wantMessages := []string{
"Unlocked account 7ef5a6135f1fd6a02593eedc869c6d41d934aef8",
"Unlocked account 289d485d9771714cce91d3393d764e1311907acc",
}
for _, m := range wantMessages {
if strings.Index(geth.stderrText(), m) == -1 {
t.Errorf("stderr text does not contain %q", m)
}
}
}
func TestUnlockFlagPasswordFileWrongPassword(t *testing.T) {
datadir := tmpDatadirWithKeystore(t)
geth := runGeth(t,
"--datadir", datadir, "--nat", "none", "--nodiscover", "--dev",
"--password", "testdata/wrong-passwords.txt", "--unlock", "0,2")
defer geth.expectExit()
geth.expect(`
Fatal: Failed to unlock account 0 (could not decrypt key with given passphrase)
`)
}
func TestUnlockFlagAmbiguous(t *testing.T) {
store := filepath.Join("..", "..", "accounts", "testdata", "dupes")
geth := runGeth(t,
"--keystore", store, "--nat", "none", "--nodiscover", "--dev",
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a",
"js", "testdata/empty.js")
defer geth.expectExit()
// Helper for the expect template, returns absolute keystore path.
geth.setTemplateFunc("keypath", func(file string) string {
abs, _ := filepath.Abs(filepath.Join(store, file))
return abs
})
geth.expect(`
Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 1/3
!! Unsupported terminal, password will be echoed.
Passphrase: {{.InputLine "foobar"}}
Multiple key files exist for address f466859ead1932d743d622cb74fc058882e8648a:
{{keypath "1"}}
{{keypath "2"}}
Testing your passphrase against all of them...
Your passphrase unlocked {{keypath "1"}}
In order to avoid this warning, you need to remove the following duplicate key files:
{{keypath "2"}}
`)
geth.expectExit()
wantMessages := []string{
"Unlocked account f466859ead1932d743d622cb74fc058882e8648a",
}
for _, m := range wantMessages {
if strings.Index(geth.stderrText(), m) == -1 {
t.Errorf("stderr text does not contain %q", m)
}
}
}
func TestUnlockFlagAmbiguousWrongPassword(t *testing.T) {
store := filepath.Join("..", "..", "accounts", "testdata", "dupes")
geth := runGeth(t,
"--keystore", store, "--nat", "none", "--nodiscover", "--dev",
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a")
defer geth.expectExit()
// Helper for the expect template, returns absolute keystore path.
geth.setTemplateFunc("keypath", func(file string) string {
abs, _ := filepath.Abs(filepath.Join(store, file))
return abs
})
geth.expect(`
Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 1/3
!! Unsupported terminal, password will be echoed.
Passphrase: {{.InputLine "wrong"}}
Multiple key files exist for address f466859ead1932d743d622cb74fc058882e8648a:
{{keypath "1"}}
{{keypath "2"}}
Testing your passphrase against all of them...
Fatal: None of the listed files could be unlocked.
`)
geth.expectExit()
}

View File

@ -116,7 +116,7 @@ func exportChain(ctx *cli.Context) {
}
func removeDB(ctx *cli.Context) {
confirm, err := utils.PromptConfirm("Remove local database?")
confirm, err := utils.Stdin.ConfirmPrompt("Remove local database?")
if err != nil {
utils.Fatalf("%v", err)
}

View File

@ -17,7 +17,6 @@
package main
import (
"bufio"
"fmt"
"math/big"
"os"
@ -28,6 +27,7 @@ import (
"strings"
"github.com/codegangsta/cli"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/registrar"
@ -46,30 +46,6 @@ var (
exit = regexp.MustCompile("^\\s*exit\\s*;*\\s*$")
)
type prompter interface {
AppendHistory(string)
Prompt(p string) (string, error)
PasswordPrompt(p string) (string, error)
}
type dumbterm struct{ r *bufio.Reader }
func (r dumbterm) Prompt(p string) (string, error) {
fmt.Print(p)
line, err := r.r.ReadString('\n')
return strings.TrimSuffix(line, "\n"), err
}
func (r dumbterm) PasswordPrompt(p string) (string, error) {
fmt.Println("!! Unsupported terminal, password will echo.")
fmt.Print(p)
input, err := bufio.NewReader(os.Stdin).ReadString('\n')
fmt.Println()
return input, err
}
func (r dumbterm) AppendHistory(string) {}
type jsre struct {
re *re.JSRE
stack *node.Node
@ -78,7 +54,6 @@ type jsre struct {
atexit func()
corsDomain string
client rpc.Client
prompter
}
func makeCompleter(re *jsre) liner.WordCompleter {
@ -106,27 +81,11 @@ func newLightweightJSRE(docRoot string, client rpc.Client, datadir string, inter
js := &jsre{ps1: "> "}
js.wait = make(chan *big.Int)
js.client = client
js.re = re.New(docRoot)
if err := js.apiBindings(); err != nil {
utils.Fatalf("Unable to initialize console - %v", err)
}
if !liner.TerminalSupported() || !interactive {
js.prompter = dumbterm{bufio.NewReader(os.Stdin)}
} else {
lr := liner.NewLiner()
js.withHistory(datadir, func(hist *os.File) { lr.ReadHistory(hist) })
lr.SetCtrlCAborts(true)
lr.SetWordCompleter(makeCompleter(js))
lr.SetTabCompletionStyle(liner.TabPrints)
js.prompter = lr
js.atexit = func() {
js.withHistory(datadir, func(hist *os.File) { hist.Truncate(0); lr.WriteHistory(hist) })
lr.Close()
close(js.wait)
}
}
js.setupInput(datadir)
return js
}
@ -136,30 +95,29 @@ func newJSRE(stack *node.Node, docRoot, corsDomain string, client rpc.Client, in
js.corsDomain = corsDomain
js.wait = make(chan *big.Int)
js.client = client
js.re = re.New(docRoot)
if err := js.apiBindings(); err != nil {
utils.Fatalf("Unable to connect - %v", err)
}
if !liner.TerminalSupported() || !interactive {
js.prompter = dumbterm{bufio.NewReader(os.Stdin)}
} else {
lr := liner.NewLiner()
js.withHistory(stack.DataDir(), func(hist *os.File) { lr.ReadHistory(hist) })
lr.SetCtrlCAborts(true)
lr.SetWordCompleter(makeCompleter(js))
lr.SetTabCompletionStyle(liner.TabPrints)
js.prompter = lr
js.atexit = func() {
js.withHistory(stack.DataDir(), func(hist *os.File) { hist.Truncate(0); lr.WriteHistory(hist) })
lr.Close()
close(js.wait)
}
}
js.setupInput(stack.DataDir())
return js
}
func (self *jsre) setupInput(datadir string) {
self.withHistory(datadir, func(hist *os.File) { utils.Stdin.ReadHistory(hist) })
utils.Stdin.SetCtrlCAborts(true)
utils.Stdin.SetWordCompleter(makeCompleter(self))
utils.Stdin.SetTabCompletionStyle(liner.TabPrints)
self.atexit = func() {
self.withHistory(datadir, func(hist *os.File) {
hist.Truncate(0)
utils.Stdin.WriteHistory(hist)
})
utils.Stdin.Close()
close(self.wait)
}
}
func (self *jsre) batch(statement string) {
err := self.re.EvalAndPrettyPrint(statement)
@ -290,7 +248,7 @@ func (js *jsre) apiBindings() error {
}
func (self *jsre) AskPassword() (string, bool) {
pass, err := self.PasswordPrompt("Passphrase: ")
pass, err := utils.Stdin.PasswordPrompt("Passphrase: ")
if err != nil {
return "", false
}
@ -315,7 +273,7 @@ func (self *jsre) ConfirmTransaction(tx string) bool {
func (self *jsre) UnlockAccount(addr []byte) bool {
fmt.Printf("Please unlock account %x.\n", addr)
pass, err := self.PasswordPrompt("Passphrase: ")
pass, err := utils.Stdin.PasswordPrompt("Passphrase: ")
if err != nil {
return false
}
@ -324,7 +282,8 @@ func (self *jsre) UnlockAccount(addr []byte) bool {
if err := self.stack.Service(&ethereum); err != nil {
return false
}
if err := ethereum.AccountManager().Unlock(common.BytesToAddress(addr), pass); err != nil {
a := accounts.Account{Address: common.BytesToAddress(addr)}
if err := ethereum.AccountManager().Unlock(a, pass); err != nil {
return false
} else {
fmt.Println("Account is now unlocked for this session.")
@ -365,7 +324,7 @@ func (self *jsre) interactive() {
go func() {
defer close(inputln)
for {
line, err := self.Prompt(<-prompt)
line, err := utils.Stdin.Prompt(<-prompt)
if err != nil {
if err == liner.ErrPromptAborted { // ctrl-C
self.resetPrompt()
@ -404,7 +363,7 @@ func (self *jsre) interactive() {
self.setIndent()
if indentCount <= 0 {
if mustLogInHistory(str) {
self.AppendHistory(str[:len(str)-1])
utils.Stdin.AppendHistory(str[:len(str)-1])
}
self.parseInput(str)
str = ""

View File

@ -42,18 +42,17 @@ import (
const (
testSolcPath = ""
solcVersion = "0.9.23"
testKey = "e6fab74a43941f82d89cb7faa408e227cdad3153c4720e540e855c19b15e6674"
testAddress = "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182"
testBalance = "10000000000000000000"
testAddress = "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182"
testBalance = "10000000000000000000"
// of empty string
testHash = "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
)
var (
versionRE = regexp.MustCompile(strconv.Quote(`"compilerVersion":"` + solcVersion + `"`))
testNodeKey = crypto.ToECDSA(common.Hex2Bytes("4b50fa71f5c3eeb8fdc452224b2395af2fcc3d125e06c32c82e048c0559db03f"))
testGenesis = `{"` + testAddress[2:] + `": {"balance": "` + testBalance + `"}}`
versionRE = regexp.MustCompile(strconv.Quote(`"compilerVersion":"` + solcVersion + `"`))
testNodeKey, _ = crypto.HexToECDSA("4b50fa71f5c3eeb8fdc452224b2395af2fcc3d125e06c32c82e048c0559db03f")
testAccount, _ = crypto.HexToECDSA("e6fab74a43941f82d89cb7faa408e227cdad3153c4720e540e855c19b15e6674")
testGenesis = `{"` + testAddress[2:] + `": {"balance": "` + testBalance + `"}}`
)
type testjethre struct {
@ -62,17 +61,6 @@ type testjethre struct {
client *httpclient.HTTPClient
}
func (self *testjethre) UnlockAccount(acc []byte) bool {
var ethereum *eth.Ethereum
self.stack.Service(&ethereum)
err := ethereum.AccountManager().Unlock(common.BytesToAddress(acc), "")
if err != nil {
panic("unable to unlock")
}
return true
}
// Temporary disabled while natspec hasn't been migrated
//func (self *testjethre) ConfirmTransaction(tx string) bool {
// var ethereum *eth.Ethereum
@ -94,17 +82,14 @@ func testREPL(t *testing.T, config func(*eth.Config)) (string, *testjethre, *nod
t.Fatal(err)
}
// Create a networkless protocol stack
stack, err := node.New(&node.Config{PrivateKey: testNodeKey, Name: "test", NoDiscovery: true})
stack, err := node.New(&node.Config{DataDir: tmp, PrivateKey: testNodeKey, Name: "test", NoDiscovery: true})
if err != nil {
t.Fatalf("failed to create node: %v", err)
}
// Initialize and register the Ethereum protocol
keystore := crypto.NewKeyStorePlain(filepath.Join(tmp, "keystore"))
accman := accounts.NewManager(keystore)
accman := accounts.NewPlaintextManager(filepath.Join(tmp, "keystore"))
db, _ := ethdb.NewMemDatabase()
core.WriteGenesisBlockForTesting(db, core.GenesisAccount{common.HexToAddress(testAddress), common.String2Big(testBalance)})
ethConf := &eth.Config{
ChainConfig: &core.ChainConfig{HomesteadBlock: new(big.Int)},
TestGenesisState: db,
@ -122,15 +107,11 @@ func testREPL(t *testing.T, config func(*eth.Config)) (string, *testjethre, *nod
t.Fatalf("failed to register ethereum protocol: %v", err)
}
// Initialize all the keys for testing
keyb, err := crypto.HexToECDSA(testKey)
a, err := accman.ImportECDSA(testAccount, "")
if err != nil {
t.Fatal(err)
}
key := crypto.NewKeyFromECDSA(keyb)
if err := keystore.StoreKey(key, ""); err != nil {
t.Fatal(err)
}
if err := accman.Unlock(key.Address, ""); err != nil {
if err := accman.Unlock(a, ""); err != nil {
t.Fatal(err)
}
// Start the node and assemble the REPL tester

View File

@ -29,7 +29,6 @@ import (
"github.com/codegangsta/cli"
"github.com/ethereum/ethash"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
@ -75,6 +74,8 @@ func init() {
removedbCommand,
dumpCommand,
monitorCommand,
accountCommand,
walletCommand,
{
Action: makedag,
Name: "makedag",
@ -110,144 +111,6 @@ Runs quick benchmark on first GPU found.
The output of this command is supposed to be machine-readable.
`,
},
{
Name: "wallet",
Usage: "ethereum presale wallet",
Subcommands: []cli.Command{
{
Action: importWallet,
Name: "import",
Usage: "import ethereum presale wallet",
},
},
Description: `
get wallet import /path/to/my/presale.wallet
will prompt for your password and imports your ether presale account.
It can be used non-interactively with the --password option taking a
passwordfile as argument containing the wallet password in plaintext.
`},
{
Action: accountList,
Name: "account",
Usage: "manage accounts",
Description: `
Manage accounts lets you create new accounts, list all existing accounts,
import a private key into a new account.
' help' shows a list of subcommands or help for one subcommand.
It supports interactive mode, when you are prompted for password as well as
non-interactive mode where passwords are supplied via a given password file.
Non-interactive mode is only meant for scripted use on test networks or known
safe environments.
Make sure you remember the password you gave when creating a new account (with
either new or import). Without it you are not able to unlock your account.
Note that exporting your key in unencrypted format is NOT supported.
Keys are stored under <DATADIR>/keys.
It is safe to transfer the entire directory or the individual keys therein
between ethereum nodes by simply copying.
Make sure you backup your keys regularly.
In order to use your account to send transactions, you need to unlock them using
the '--unlock' option. The argument is a space separated list of addresses or
indexes. If used non-interactively with a passwordfile, the file should contain
the respective passwords one per line. If you unlock n accounts and the password
file contains less than n entries, then the last password is meant to apply to
all remaining accounts.
And finally. DO NOT FORGET YOUR PASSWORD.
`,
Subcommands: []cli.Command{
{
Action: accountList,
Name: "list",
Usage: "print account addresses",
},
{
Action: accountCreate,
Name: "new",
Usage: "create a new account",
Description: `
ethereum account new
Creates a new account. Prints the address.
The account is saved in encrypted format, you are prompted for a passphrase.
You must remember this passphrase to unlock your account in the future.
For non-interactive use the passphrase can be specified with the --password flag:
ethereum --password <passwordfile> account new
Note, this is meant to be used for testing only, it is a bad idea to save your
password to file or expose in any other way.
`,
},
{
Action: accountUpdate,
Name: "update",
Usage: "update an existing account",
Description: `
ethereum account update <address>
Update an existing account.
The account is saved in the newest version in encrypted format, you are prompted
for a passphrase to unlock the account and another to save the updated file.
This same command can therefore be used to migrate an account of a deprecated
format to the newest format or change the password for an account.
For non-interactive use the passphrase can be specified with the --password flag:
ethereum --password <passwordfile> account update <address>
Since only one password can be given, only format update can be performed,
changing your password is only possible interactively.
Note that account update has the a side effect that the order of your accounts
changes.
`,
},
{
Action: accountImport,
Name: "import",
Usage: "import a private key into a new account",
Description: `
ethereum account import <keyfile>
Imports an unencrypted private key from <keyfile> and creates a new account.
Prints the address.
The keyfile is assumed to contain an unencrypted private key in hexadecimal format.
The account is saved in encrypted format, you are prompted for a passphrase.
You must remember this passphrase to unlock your account in the future.
For non-interactive use the passphrase can be specified with the -password flag:
ethereum --password <passwordfile> account import <keyfile>
Note:
As you can directly copy your encrypted accounts to another ethereum instance,
this import mechanism is not needed when you transfer an account between
nodes.
`,
},
},
},
{
Action: initGenesis,
Name: "init",
@ -289,6 +152,7 @@ JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Conso
`,
},
}
app.Flags = []cli.Flag{
utils.IdentityFlag,
utils.UnlockedAccountFlag,
@ -373,6 +237,7 @@ JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Conso
app.After = func(ctx *cli.Context) error {
logger.Flush()
debug.Exit()
utils.Stdin.Close() // Resets terminal mode.
return nil
}
}
@ -524,25 +389,6 @@ func execScripts(ctx *cli.Context) {
node.Stop()
}
// tries unlocking the specified account a few times.
func unlockAccount(ctx *cli.Context, accman *accounts.Manager, address string, i int, passwords []string) (common.Address, string) {
account, err := utils.MakeAddress(accman, address)
if err != nil {
utils.Fatalf("Unlock error: %v", err)
}
for trials := 0; trials < 3; trials++ {
prompt := fmt.Sprintf("Unlocking account %s | Attempt %d/%d", address, trials+1, 3)
password := getPassPhrase(prompt, false, i, passwords)
if err := accman.Unlock(account, password); err == nil {
return account, password
}
}
// All trials expended to unlock account, bail out
utils.Fatalf("Failed to unlock account: %s", address)
return common.Address{}, ""
}
// startNode boots up the system node and all registered protocols, after which
// it unlocks any requested accounts, and starts the RPC/IPC interfaces and the
// miner.
@ -572,106 +418,6 @@ func startNode(ctx *cli.Context, stack *node.Node) {
}
}
func accountList(ctx *cli.Context) {
accman := utils.MakeAccountManager(ctx)
accts, err := accman.Accounts()
if err != nil {
utils.Fatalf("Could not list accounts: %v", err)
}
for i, acct := range accts {
fmt.Printf("Account #%d: %x\n", i, acct)
}
}
// getPassPhrase retrieves the passwor associated with an account, either fetched
// from a list of preloaded passphrases, or requested interactively from the user.
func getPassPhrase(prompt string, confirmation bool, i int, passwords []string) string {
// If a list of passwords was supplied, retrieve from them
if len(passwords) > 0 {
if i < len(passwords) {
return passwords[i]
}
return passwords[len(passwords)-1]
}
// Otherwise prompt the user for the password
fmt.Println(prompt)
password, err := utils.PromptPassword("Passphrase: ", true)
if err != nil {
utils.Fatalf("Failed to read passphrase: %v", err)
}
if confirmation {
confirm, err := utils.PromptPassword("Repeat passphrase: ", false)
if err != nil {
utils.Fatalf("Failed to read passphrase confirmation: %v", err)
}
if password != confirm {
utils.Fatalf("Passphrases do not match")
}
}
return password
}
// accountCreate creates a new account into the keystore defined by the CLI flags.
func accountCreate(ctx *cli.Context) {
accman := utils.MakeAccountManager(ctx)
password := getPassPhrase("Your new account is locked with a password. Please give a password. Do not forget this password.", true, 0, utils.MakePasswordList(ctx))
account, err := accman.NewAccount(password)
if err != nil {
utils.Fatalf("Failed to create account: %v", err)
}
fmt.Printf("Address: %x\n", account)
}
// accountUpdate transitions an account from a previous format to the current
// one, also providing the possibility to change the pass-phrase.
func accountUpdate(ctx *cli.Context) {
if len(ctx.Args()) == 0 {
utils.Fatalf("No accounts specified to update")
}
accman := utils.MakeAccountManager(ctx)
account, oldPassword := unlockAccount(ctx, accman, ctx.Args().First(), 0, nil)
newPassword := getPassPhrase("Please give a new password. Do not forget this password.", true, 0, nil)
if err := accman.Update(account, oldPassword, newPassword); err != nil {
utils.Fatalf("Could not update the account: %v", err)
}
}
func importWallet(ctx *cli.Context) {
keyfile := ctx.Args().First()
if len(keyfile) == 0 {
utils.Fatalf("keyfile must be given as argument")
}
keyJson, err := ioutil.ReadFile(keyfile)
if err != nil {
utils.Fatalf("Could not read wallet file: %v", err)
}
accman := utils.MakeAccountManager(ctx)
passphrase := getPassPhrase("", false, 0, utils.MakePasswordList(ctx))
acct, err := accman.ImportPreSaleKey(keyJson, passphrase)
if err != nil {
utils.Fatalf("Could not create the account: %v", err)
}
fmt.Printf("Address: %x\n", acct)
}
func accountImport(ctx *cli.Context) {
keyfile := ctx.Args().First()
if len(keyfile) == 0 {
utils.Fatalf("keyfile must be given as argument")
}
accman := utils.MakeAccountManager(ctx)
passphrase := getPassPhrase("Your new account is locked with a password. Please give a password. Do not forget this password.", true, 0, utils.MakePasswordList(ctx))
acct, err := accman.Import(keyfile, passphrase)
if err != nil {
utils.Fatalf("Could not create the account: %v", err)
}
fmt.Printf("Address: %x\n", acct)
}
func makedag(ctx *cli.Context) {
args := ctx.Args()
wrongArgs := func() {

290
cmd/geth/run_test.go Normal file
View File

@ -0,0 +1,290 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"bufio"
"bytes"
"fmt"
"html/template"
"io"
"io/ioutil"
"os"
"os/exec"
"regexp"
"sync"
"testing"
"time"
)
func tmpdir(t *testing.T) string {
dir, err := ioutil.TempDir("", "geth-test")
if err != nil {
t.Fatal(err)
}
return dir
}
type testgeth struct {
// For total convenience, all testing methods are available.
*testing.T
// template variables for expect
Datadir string
Executable string
Func template.FuncMap
removeDatadir bool
cmd *exec.Cmd
stdout *bufio.Reader
stdin io.WriteCloser
stderr *testlogger
}
func init() {
// Run the app if we're the child process for runGeth.
if os.Getenv("GETH_TEST_CHILD") != "" {
app.RunAndExitOnError()
os.Exit(0)
}
}
// spawns geth with the given command line args. If the args don't set --datadir, the
// child g gets a temporary data directory.
func runGeth(t *testing.T, args ...string) *testgeth {
tt := &testgeth{T: t, Executable: os.Args[0]}
for i, arg := range args {
if arg == "-datadir" || arg == "--datadir" {
if i < len(args)-1 {
tt.Datadir = args[i+1]
}
break
}
}
if tt.Datadir == "" {
tt.Datadir = tmpdir(t)
tt.removeDatadir = true
args = append([]string{"-datadir", tt.Datadir}, args...)
// Remove the temporary datadir if something fails below.
defer func() {
if t.Failed() {
os.RemoveAll(tt.Datadir)
}
}()
}
// Boot "geth". This actually runs the test binary but the init function
// will prevent any tests from running.
tt.stderr = &testlogger{t: t}
tt.cmd = exec.Command(os.Args[0], args...)
tt.cmd.Env = append(os.Environ(), "GETH_TEST_CHILD=1")
tt.cmd.Stderr = tt.stderr
stdout, err := tt.cmd.StdoutPipe()
if err != nil {
t.Fatal(err)
}
tt.stdout = bufio.NewReader(stdout)
if tt.stdin, err = tt.cmd.StdinPipe(); err != nil {
t.Fatal(err)
}
if err := tt.cmd.Start(); err != nil {
t.Fatal(err)
}
return tt
}
// InputLine writes the given text to the childs stdin.
// This method can also be called from an expect template, e.g.:
//
// geth.expect(`Passphrase: {{.InputLine "password"}}`)
func (tt *testgeth) InputLine(s string) string {
io.WriteString(tt.stdin, s+"\n")
return ""
}
func (tt *testgeth) setTemplateFunc(name string, fn interface{}) {
if tt.Func == nil {
tt.Func = make(map[string]interface{})
}
tt.Func[name] = fn
}
// expect runs its argument as a template, then expects the
// child process to output the result of the template within 5s.
//
// If the template starts with a newline, the newline is removed
// before matching.
func (tt *testgeth) expect(tplsource string) {
// Generate the expected output by running the template.
tpl := template.Must(template.New("").Funcs(tt.Func).Parse(tplsource))
wantbuf := new(bytes.Buffer)
if err := tpl.Execute(wantbuf, tt); err != nil {
panic(err)
}
// Trim exactly one newline at the beginning. This makes tests look
// much nicer because all expect strings are at column 0.
want := bytes.TrimPrefix(wantbuf.Bytes(), []byte("\n"))
if err := tt.matchExactOutput(want); err != nil {
tt.Fatal(err)
}
tt.Logf("Matched stdout text:\n%s", want)
}
func (tt *testgeth) matchExactOutput(want []byte) error {
buf := make([]byte, len(want))
n := 0
tt.withKillTimeout(func() { n, _ = io.ReadFull(tt.stdout, buf) })
buf = buf[:n]
if n < len(want) || !bytes.Equal(buf, want) {
// Grab any additional buffered output in case of mismatch
// because it might help with debugging.
buf = append(buf, make([]byte, tt.stdout.Buffered())...)
tt.stdout.Read(buf[n:])
// Find the mismatch position.
for i := 0; i < n; i++ {
if want[i] != buf[i] {
return fmt.Errorf("Output mismatch at ◊:\n---------------- (stdout text)\n%s◊%s\n---------------- (expected text)\n%s",
buf[:i], buf[i:n], want)
}
}
if n < len(want) {
return fmt.Errorf("Not enough output, got until ◊:\n---------------- (stdout text)\n%s\n---------------- (expected text)\n%s◊%s",
buf, want[:n], want[n:])
}
}
return nil
}
// expectRegexp expects the child process to output text matching the
// given regular expression within 5s.
//
// Note that an arbitrary amount of output may be consumed by the
// regular expression. This usually means that expect cannot be used
// after expectRegexp.
func (tt *testgeth) expectRegexp(resource string) (*regexp.Regexp, []string) {
var (
re = regexp.MustCompile(resource)
rtee = &runeTee{in: tt.stdout}
matches []int
)
tt.withKillTimeout(func() { matches = re.FindReaderSubmatchIndex(rtee) })
output := rtee.buf.Bytes()
if matches == nil {
tt.Fatalf("Output did not match:\n---------------- (stdout text)\n%s\n---------------- (regular expression)\n%s",
output, resource)
return re, nil
}
tt.Logf("Matched stdout text:\n%s", output)
var submatch []string
for i := 0; i < len(matches); i += 2 {
submatch = append(submatch, string(output[i:i+1]))
}
return re, submatch
}
// expectExit expects the child process to exit within 5s without
// printing any additional text on stdout.
func (tt *testgeth) expectExit() {
var output []byte
tt.withKillTimeout(func() {
output, _ = ioutil.ReadAll(tt.stdout)
})
tt.cmd.Wait()
if tt.removeDatadir {
os.RemoveAll(tt.Datadir)
}
if len(output) > 0 {
tt.Errorf("Unmatched stdout text:\n%s", output)
}
}
func (tt *testgeth) interrupt() {
tt.cmd.Process.Signal(os.Interrupt)
}
// stderrText returns any stderr output written so far.
// The returned text holds all log lines after expectExit has
// returned.
func (tt *testgeth) stderrText() string {
tt.stderr.mu.Lock()
defer tt.stderr.mu.Unlock()
return tt.stderr.buf.String()
}
func (tt *testgeth) withKillTimeout(fn func()) {
timeout := time.AfterFunc(5*time.Second, func() {
tt.Log("killing the child process (timeout)")
tt.cmd.Process.Kill()
if tt.removeDatadir {
os.RemoveAll(tt.Datadir)
}
})
defer timeout.Stop()
fn()
}
// testlogger logs all written lines via t.Log and also
// collects them for later inspection.
type testlogger struct {
t *testing.T
mu sync.Mutex
buf bytes.Buffer
}
func (tl *testlogger) Write(b []byte) (n int, err error) {
lines := bytes.Split(b, []byte("\n"))
for _, line := range lines {
if len(line) > 0 {
tl.t.Logf("(stderr) %s", line)
}
}
tl.mu.Lock()
tl.buf.Write(b)
tl.mu.Unlock()
return len(b), err
}
// runeTee collects text read through it into buf.
type runeTee struct {
in interface {
io.Reader
io.ByteReader
io.RuneReader
}
buf bytes.Buffer
}
func (rtee *runeTee) Read(b []byte) (n int, err error) {
n, err = rtee.in.Read(b)
rtee.buf.Write(b[:n])
return n, err
}
func (rtee *runeTee) ReadRune() (r rune, size int, err error) {
r, size, err = rtee.in.ReadRune()
if err == nil {
rtee.buf.WriteRune(r)
}
return r, size, err
}
func (rtee *runeTee) ReadByte() (b byte, err error) {
b, err = rtee.in.ReadByte()
if err == nil {
rtee.buf.WriteByte(b)
}
return b, err
}

1
cmd/geth/testdata/empty.js vendored Normal file
View File

@ -0,0 +1 @@

6
cmd/geth/testdata/guswallet.json vendored Normal file
View File

@ -0,0 +1,6 @@
{
"encseed": "26d87f5f2bf9835f9a47eefae571bc09f9107bb13d54ff12a4ec095d01f83897494cf34f7bed2ed34126ecba9db7b62de56c9d7cd136520a0427bfb11b8954ba7ac39b90d4650d3448e31185affcd74226a68f1e94b1108e6e0a4a91cdd83eba",
"ethaddr": "d4584b5f6229b7be90727b0fc8c6b91bb427821f",
"email": "gustav.simonsson@gmail.com",
"btcaddr": "1EVknXyFC68kKNLkh6YnKzW41svSRoaAcx"
}

3
cmd/geth/testdata/passwords.txt vendored Normal file
View File

@ -0,0 +1,3 @@
foobar
foobar
foobar

3
cmd/geth/testdata/wrong-passwords.txt vendored Normal file
View File

@ -0,0 +1,3 @@
wrong
wrong
wrong

View File

@ -108,24 +108,23 @@ func MakeSystemNode(keydir string, privkey string, test *tests.BlockTest) (*node
return nil, err
}
// Create the keystore and inject an unlocked account if requested
keystore := crypto.NewKeyStorePassphrase(keydir, crypto.StandardScryptN, crypto.StandardScryptP)
accman := accounts.NewManager(keystore)
accman := accounts.NewPlaintextManager(keydir)
if len(privkey) > 0 {
key, err := crypto.HexToECDSA(privkey)
if err != nil {
return nil, err
}
if err := keystore.StoreKey(crypto.NewKeyFromECDSA(key), ""); err != nil {
a, err := accman.ImportECDSA(key, "")
if err != nil {
return nil, err
}
if err := accman.Unlock(crypto.NewKeyFromECDSA(key).Address, ""); err != nil {
if err := accman.Unlock(a, ""); err != nil {
return nil, err
}
}
// Initialize and register the Ethereum protocol
db, _ := ethdb.NewMemDatabase()
if _, err := test.InsertPreState(db, accman); err != nil {
if _, err := test.InsertPreState(db); err != nil {
return nil, err
}
ethConf := &eth.Config{

View File

@ -18,13 +18,11 @@
package utils
import (
"bufio"
"fmt"
"io"
"os"
"os/signal"
"regexp"
"strings"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
@ -34,17 +32,12 @@ import (
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/rlp"
"github.com/peterh/liner"
)
const (
importBatchSize = 2500
)
var (
interruptCallbacks = []func(os.Signal){}
)
func openLogFile(Datadir string, filename string) *os.File {
path := common.AbsolutePath(Datadir, filename)
file, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
@ -54,49 +47,6 @@ func openLogFile(Datadir string, filename string) *os.File {
return file
}
func PromptConfirm(prompt string) (bool, error) {
var (
input string
err error
)
prompt = prompt + " [y/N] "
// if liner.TerminalSupported() {
// fmt.Println("term")
// lr := liner.NewLiner()
// defer lr.Close()
// input, err = lr.Prompt(prompt)
// } else {
fmt.Print(prompt)
input, err = bufio.NewReader(os.Stdin).ReadString('\n')
fmt.Println()
// }
if len(input) > 0 && strings.ToUpper(input[:1]) == "Y" {
return true, nil
} else {
return false, nil
}
return false, err
}
func PromptPassword(prompt string, warnTerm bool) (string, error) {
if liner.TerminalSupported() {
lr := liner.NewLiner()
defer lr.Close()
return lr.PasswordPrompt(prompt)
}
if warnTerm {
fmt.Println("!! Unsupported terminal, password will be echoed.")
}
fmt.Print(prompt)
input, err := bufio.NewReader(os.Stdin).ReadString('\n')
input = strings.TrimRight(input, "\r\n")
fmt.Println()
return input, err
}
// Fatalf formats a message to standard error and exits the program.
// The message is also printed to standard output if standard error
// is redirected to a different file.

View File

@ -551,45 +551,36 @@ func MakeDatabaseHandles() int {
// MakeAccountManager creates an account manager from set command line flags.
func MakeAccountManager(ctx *cli.Context) *accounts.Manager {
// Create the keystore crypto primitive, light if requested
scryptN := crypto.StandardScryptN
scryptP := crypto.StandardScryptP
scryptN := accounts.StandardScryptN
scryptP := accounts.StandardScryptP
if ctx.GlobalBool(LightKDFFlag.Name) {
scryptN = crypto.LightScryptN
scryptP = crypto.LightScryptP
scryptN = accounts.LightScryptN
scryptP = accounts.LightScryptP
}
// Assemble an account manager using the configured datadir
var (
datadir = MustMakeDataDir(ctx)
keystoredir = MakeKeyStoreDir(datadir, ctx)
keystore = crypto.NewKeyStorePassphrase(keystoredir, scryptN, scryptP)
)
return accounts.NewManager(keystore)
datadir := MustMakeDataDir(ctx)
keydir := MakeKeyStoreDir(datadir, ctx)
return accounts.NewManager(keydir, scryptN, scryptP)
}
// MakeAddress converts an account specified directly as a hex encoded string or
// a key index in the key store to an internal account representation.
func MakeAddress(accman *accounts.Manager, account string) (a common.Address, err error) {
func MakeAddress(accman *accounts.Manager, account string) (accounts.Account, error) {
// If the specified account is a valid address, return it
if common.IsHexAddress(account) {
return common.HexToAddress(account), nil
return accounts.Account{Address: common.HexToAddress(account)}, nil
}
// Otherwise try to interpret the account as a keystore index
index, err := strconv.Atoi(account)
if err != nil {
return a, fmt.Errorf("invalid account address or index %q", account)
return accounts.Account{}, fmt.Errorf("invalid account address or index %q", account)
}
hex, err := accman.AddressByIndex(index)
if err != nil {
return a, fmt.Errorf("can't get account #%d (%v)", index, err)
}
return common.HexToAddress(hex), nil
return accman.AccountByIndex(index)
}
// MakeEtherbase retrieves the etherbase either from the directly specified
// command line flags or from the keystore if CLI indexed.
func MakeEtherbase(accman *accounts.Manager, ctx *cli.Context) common.Address {
accounts, _ := accman.Accounts()
accounts := accman.Accounts()
if !ctx.GlobalIsSet(EtherbaseFlag.Name) && len(accounts) == 0 {
glog.V(logger.Error).Infoln("WARNING: No etherbase set and no accounts found as default")
return common.Address{}
@ -599,11 +590,11 @@ func MakeEtherbase(accman *accounts.Manager, ctx *cli.Context) common.Address {
return common.Address{}
}
// If the specified etherbase is a valid address, return it
addr, err := MakeAddress(accman, etherbase)
account, err := MakeAddress(accman, etherbase)
if err != nil {
Fatalf("Option %q: %v", EtherbaseFlag.Name, err)
}
return addr
return account.Address
}
// MakeMinerExtra resolves extradata for the miner from the set command line flags
@ -615,17 +606,22 @@ func MakeMinerExtra(extra []byte, ctx *cli.Context) []byte {
return extra
}
// MakePasswordList loads up a list of password from a file specified by the
// command line flags.
// MakePasswordList reads password lines from the file specified by --password.
func MakePasswordList(ctx *cli.Context) []string {
if path := ctx.GlobalString(PasswordFileFlag.Name); path != "" {
blob, err := ioutil.ReadFile(path)
if err != nil {
Fatalf("Failed to read password file: %v", err)
}
return strings.Split(string(blob), "\n")
path := ctx.GlobalString(PasswordFileFlag.Name)
if path == "" {
return nil
}
return nil
text, err := ioutil.ReadFile(path)
if err != nil {
Fatalf("Failed to read password file: %v", err)
}
lines := strings.Split(string(text), "\n")
// Sanitise DOS line endings.
for i := range lines {
lines[i] = strings.TrimRight(lines[i], "\r")
}
return lines
}
// MakeSystemNode sets up a local node, configures the services to launch and

98
cmd/utils/input.go Normal file
View File

@ -0,0 +1,98 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package utils
import (
"fmt"
"strings"
"github.com/peterh/liner"
)
// Holds the stdin line reader.
// Only this reader may be used for input because it keeps
// an internal buffer.
var Stdin = newUserInputReader()
type userInputReader struct {
*liner.State
warned bool
supported bool
normalMode liner.ModeApplier
rawMode liner.ModeApplier
}
func newUserInputReader() *userInputReader {
r := new(userInputReader)
// Get the original mode before calling NewLiner.
// This is usually regular "cooked" mode where characters echo.
normalMode, _ := liner.TerminalMode()
// Turn on liner. It switches to raw mode.
r.State = liner.NewLiner()
rawMode, err := liner.TerminalMode()
if err != nil || !liner.TerminalSupported() {
r.supported = false
} else {
r.supported = true
r.normalMode = normalMode
r.rawMode = rawMode
// Switch back to normal mode while we're not prompting.
normalMode.ApplyMode()
}
return r
}
func (r *userInputReader) Prompt(prompt string) (string, error) {
if r.supported {
r.rawMode.ApplyMode()
defer r.normalMode.ApplyMode()
} else {
// liner tries to be smart about printing the prompt
// and doesn't print anything if input is redirected.
// Un-smart it by printing the prompt always.
fmt.Print(prompt)
prompt = ""
defer fmt.Println()
}
return r.State.Prompt(prompt)
}
func (r *userInputReader) PasswordPrompt(prompt string) (passwd string, err error) {
if r.supported {
r.rawMode.ApplyMode()
defer r.normalMode.ApplyMode()
return r.State.PasswordPrompt(prompt)
}
if !r.warned {
fmt.Println("!! Unsupported terminal, password will be echoed.")
r.warned = true
}
// Just as in Prompt, handle printing the prompt here instead of relying on liner.
fmt.Print(prompt)
passwd, err = r.State.Prompt("")
fmt.Println()
return passwd, err
}
func (r *userInputReader) ConfirmPrompt(prompt string) (bool, error) {
prompt = prompt + " [y/N] "
input, err := r.Prompt(prompt)
if len(input) > 0 && strings.ToUpper(input[:1]) == "Y" {
return true, nil
}
return false, err
}

View File

@ -75,8 +75,9 @@ func (self *Jeth) UnlockAccount(call otto.FunctionCall) (response otto.Value) {
// if password is not given or as null value -> ask user for password
if call.Argument(1).IsUndefined() || call.Argument(1).IsNull() {
fmt.Printf("Unlock account %s\n", account)
if password, err := PromptPassword("Passphrase: ", true); err == nil {
passwd, _ = otto.ToValue(password)
if input, err := Stdin.PasswordPrompt("Passphrase: "); err != nil {
return otto.FalseValue()
passwd, _ = otto.ToValue(input)
} else {
throwJSExeception(err.Error())
}
@ -111,11 +112,11 @@ func (self *Jeth) NewAccount(call otto.FunctionCall) (response otto.Value) {
var passwd string
if len(call.ArgumentList) == 0 {
var err error
passwd, err = PromptPassword("Passphrase: ", true)
passwd, err = Stdin.PasswordPrompt("Passphrase: ")
if err != nil {
return otto.FalseValue()
}
passwd2, err := PromptPassword("Repeat passphrase: ", true)
passwd2, err := Stdin.PasswordPrompt("Repeat passphrase: ")
if err != nil {
return otto.FalseValue()
}

View File

@ -158,8 +158,8 @@ func (be *registryAPIBackend) Call(fromStr, toStr, valueStr, gasStr, gasPriceStr
var from *state.StateObject
if len(fromStr) == 0 {
accounts, err := be.am.Accounts()
if err != nil || len(accounts) == 0 {
accounts := be.am.Accounts()
if len(accounts) == 0 {
from = statedb.GetOrNewStateObject(common.Address{})
} else {
from = statedb.GetOrNewStateObject(accounts[0].Address)
@ -254,8 +254,7 @@ func (be *registryAPIBackend) Transact(fromStr, toStr, nonceStr, valueStr, gasSt
tx = types.NewTransaction(nonce, to, value, gas, price, data)
}
acc := accounts.Account{from}
signature, err := be.am.Sign(acc, tx.SigHash().Bytes())
signature, err := be.am.Sign(from, tx.SigHash().Bytes())
if err != nil {
return "", err
}

View File

@ -17,8 +17,6 @@
package crypto
import (
"crypto/aes"
"crypto/cipher"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
@ -30,7 +28,6 @@ import (
"os"
"encoding/hex"
"encoding/json"
"errors"
"github.com/ethereum/go-ethereum/common"
@ -38,8 +35,6 @@ import (
"github.com/ethereum/go-ethereum/crypto/secp256k1"
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/rlp"
"github.com/pborman/uuid"
"golang.org/x/crypto/pbkdf2"
"golang.org/x/crypto/ripemd160"
)
@ -217,120 +212,6 @@ func Decrypt(prv *ecdsa.PrivateKey, ct []byte) ([]byte, error) {
return key.Decrypt(rand.Reader, ct, nil, nil)
}
// Used only by block tests.
func ImportBlockTestKey(privKeyBytes []byte) error {
ks := NewKeyStorePassphrase(common.DefaultDataDir()+"/keystore", LightScryptN, LightScryptP)
ecKey := ToECDSA(privKeyBytes)
key := &Key{
Id: uuid.NewRandom(),
Address: PubkeyToAddress(ecKey.PublicKey),
PrivateKey: ecKey,
}
err := ks.StoreKey(key, "")
return err
}
// creates a Key and stores that in the given KeyStore by decrypting a presale key JSON
func ImportPreSaleKey(keyStore KeyStore, keyJSON []byte, password string) (*Key, error) {
key, err := decryptPreSaleKey(keyJSON, password)
if err != nil {
return nil, err
}
key.Id = uuid.NewRandom()
err = keyStore.StoreKey(key, password)
return key, err
}
func decryptPreSaleKey(fileContent []byte, password string) (key *Key, err error) {
preSaleKeyStruct := struct {
EncSeed string
EthAddr string
Email string
BtcAddr string
}{}
err = json.Unmarshal(fileContent, &preSaleKeyStruct)
if err != nil {
return nil, err
}
encSeedBytes, err := hex.DecodeString(preSaleKeyStruct.EncSeed)
iv := encSeedBytes[:16]
cipherText := encSeedBytes[16:]
/*
See https://github.com/ethereum/pyethsaletool
pyethsaletool generates the encryption key from password by
2000 rounds of PBKDF2 with HMAC-SHA-256 using password as salt (:().
16 byte key length within PBKDF2 and resulting key is used as AES key
*/
passBytes := []byte(password)
derivedKey := pbkdf2.Key(passBytes, passBytes, 2000, 16, sha256.New)
plainText, err := aesCBCDecrypt(derivedKey, cipherText, iv)
if err != nil {
return nil, err
}
ethPriv := Keccak256(plainText)
ecKey := ToECDSA(ethPriv)
key = &Key{
Id: nil,
Address: PubkeyToAddress(ecKey.PublicKey),
PrivateKey: ecKey,
}
derivedAddr := hex.EncodeToString(key.Address.Bytes()) // needed because .Hex() gives leading "0x"
expectedAddr := preSaleKeyStruct.EthAddr
if derivedAddr != expectedAddr {
err = fmt.Errorf("decrypted addr '%s' not equal to expected addr '%s'", derivedAddr, expectedAddr)
}
return key, err
}
// AES-128 is selected due to size of encryptKey
func aesCTRXOR(key, inText, iv []byte) ([]byte, error) {
aesBlock, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
stream := cipher.NewCTR(aesBlock, iv)
outText := make([]byte, len(inText))
stream.XORKeyStream(outText, inText)
return outText, err
}
func aesCBCDecrypt(key, cipherText, iv []byte) ([]byte, error) {
aesBlock, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
decrypter := cipher.NewCBCDecrypter(aesBlock, iv)
paddedPlaintext := make([]byte, len(cipherText))
decrypter.CryptBlocks(paddedPlaintext, cipherText)
plaintext := PKCS7Unpad(paddedPlaintext)
if plaintext == nil {
err = errors.New("Decryption failed: PKCS7Unpad failed after AES decryption")
}
return plaintext, err
}
// From https://leanpub.com/gocrypto/read#leanpub-auto-block-cipher-modes
func PKCS7Unpad(in []byte) []byte {
if len(in) == 0 {
return nil
}
padding := in[len(in)-1]
if int(padding) > len(in) || padding > aes.BlockSize {
return nil
} else if padding == 0 {
return nil
}
for i := len(in) - 1; i > len(in)-int(padding)-1; i-- {
if in[i] != padding {
return nil
}
}
return in[:len(in)-int(padding)]
}
func PubkeyToAddress(p ecdsa.PublicKey) common.Address {
pubBytes := FromECDSAPub(&p)
return common.BytesToAddress(Keccak256(pubBytes[1:])[12:])

View File

@ -1,209 +0,0 @@
// Copyright 2014 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package crypto
import (
"encoding/hex"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"time"
"github.com/ethereum/go-ethereum/common"
)
type KeyStore interface {
// create new key using io.Reader entropy source and optionally using auth string
GenerateNewKey(io.Reader, string) (*Key, error)
GetKey(common.Address, string) (*Key, error) // get key from addr and auth string
GetKeyAddresses() ([]common.Address, error) // get all addresses
StoreKey(*Key, string) error // store key optionally using auth string
DeleteKey(common.Address, string) error // delete key by addr and auth string
Cleanup(keyAddr common.Address) (err error)
}
type keyStorePlain struct {
keysDirPath string
}
func NewKeyStorePlain(path string) KeyStore {
return &keyStorePlain{path}
}
func (ks keyStorePlain) GenerateNewKey(rand io.Reader, auth string) (key *Key, err error) {
return GenerateNewKeyDefault(ks, rand, auth)
}
func GenerateNewKeyDefault(ks KeyStore, rand io.Reader, auth string) (key *Key, err error) {
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("GenerateNewKey error: %v", r)
}
}()
key = NewKey(rand)
err = ks.StoreKey(key, auth)
return key, err
}
func (ks keyStorePlain) GetKey(keyAddr common.Address, auth string) (*Key, error) {
keyjson, err := getKeyFile(ks.keysDirPath, keyAddr)
if err != nil {
return nil, err
}
key := new(Key)
if err := json.Unmarshal(keyjson, key); err != nil {
return nil, err
}
return key, nil
}
func (ks keyStorePlain) GetKeyAddresses() (addresses []common.Address, err error) {
return getKeyAddresses(ks.keysDirPath)
}
func (ks keyStorePlain) Cleanup(keyAddr common.Address) (err error) {
return cleanup(ks.keysDirPath, keyAddr)
}
func (ks keyStorePlain) StoreKey(key *Key, auth string) (err error) {
keyJSON, err := json.Marshal(key)
if err != nil {
return
}
err = writeKeyFile(key.Address, ks.keysDirPath, keyJSON)
return
}
func (ks keyStorePlain) DeleteKey(keyAddr common.Address, auth string) (err error) {
return deleteKey(ks.keysDirPath, keyAddr)
}
func deleteKey(keysDirPath string, keyAddr common.Address) (err error) {
var path string
path, err = getKeyFilePath(keysDirPath, keyAddr)
if err == nil {
addrHex := hex.EncodeToString(keyAddr[:])
if path == filepath.Join(keysDirPath, addrHex, addrHex) {
path = filepath.Join(keysDirPath, addrHex)
}
err = os.RemoveAll(path)
}
return
}
func getKeyFilePath(keysDirPath string, keyAddr common.Address) (keyFilePath string, err error) {
addrHex := hex.EncodeToString(keyAddr[:])
matches, err := filepath.Glob(filepath.Join(keysDirPath, fmt.Sprintf("*--%s", addrHex)))
if len(matches) > 0 {
if err == nil {
keyFilePath = matches[len(matches)-1]
}
return
}
keyFilePath = filepath.Join(keysDirPath, addrHex, addrHex)
_, err = os.Stat(keyFilePath)
return
}
func cleanup(keysDirPath string, keyAddr common.Address) (err error) {
fileInfos, err := ioutil.ReadDir(keysDirPath)
if err != nil {
return
}
var paths []string
account := hex.EncodeToString(keyAddr[:])
for _, fileInfo := range fileInfos {
path := filepath.Join(keysDirPath, fileInfo.Name())
if len(path) >= 40 {
addr := path[len(path)-40 : len(path)]
if addr == account {
if path == filepath.Join(keysDirPath, addr, addr) {
path = filepath.Join(keysDirPath, addr)
}
paths = append(paths, path)
}
}
}
if len(paths) > 1 {
for i := 0; err == nil && i < len(paths)-1; i++ {
err = os.RemoveAll(paths[i])
if err != nil {
break
}
}
}
return
}
func getKeyFile(keysDirPath string, keyAddr common.Address) (fileContent []byte, err error) {
var keyFilePath string
keyFilePath, err = getKeyFilePath(keysDirPath, keyAddr)
if err == nil {
fileContent, err = ioutil.ReadFile(keyFilePath)
}
return
}
func writeKeyFile(addr common.Address, keysDirPath string, content []byte) (err error) {
filename := keyFileName(addr)
// read, write and dir search for user
err = os.MkdirAll(keysDirPath, 0700)
if err != nil {
return err
}
// read, write for user
return ioutil.WriteFile(filepath.Join(keysDirPath, filename), content, 0600)
}
// keyFilePath implements the naming convention for keyfiles:
// UTC--<created_at UTC ISO8601>-<address hex>
func keyFileName(keyAddr common.Address) string {
ts := time.Now().UTC()
return fmt.Sprintf("UTC--%s--%s", toISO8601(ts), hex.EncodeToString(keyAddr[:]))
}
func toISO8601(t time.Time) string {
var tz string
name, offset := t.Zone()
if name == "UTC" {
tz = "Z"
} else {
tz = fmt.Sprintf("%03d00", offset/3600)
}
return fmt.Sprintf("%04d-%02d-%02dT%02d-%02d-%02d.%09d%s", t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second(), t.Nanosecond(), tz)
}
func getKeyAddresses(keysDirPath string) (addresses []common.Address, err error) {
fileInfos, err := ioutil.ReadDir(keysDirPath)
if err != nil {
return nil, err
}
for _, fileInfo := range fileInfos {
filename := fileInfo.Name()
if len(filename) >= 40 {
addr := filename[len(filename)-40 : len(filename)]
address, err := hex.DecodeString(addr)
if err == nil {
addresses = append(addresses, common.BytesToAddress(address))
}
}
}
return addresses, err
}

View File

@ -29,8 +29,6 @@ import (
"sync"
"time"
"golang.org/x/net/context"
"github.com/ethereum/ethash"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
@ -48,7 +46,7 @@ import (
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc"
"gopkg.in/fatih/set.v0"
"golang.org/x/net/context"
)
const defaultGas = uint64(90000)
@ -405,7 +403,7 @@ func NewPublicAccountAPI(am *accounts.Manager) *PublicAccountAPI {
}
// Accounts returns the collection of accounts this node manages
func (s *PublicAccountAPI) Accounts() ([]accounts.Account, error) {
func (s *PublicAccountAPI) Accounts() []accounts.Account {
return s.am.Accounts()
}
@ -421,17 +419,13 @@ func NewPrivateAccountAPI(am *accounts.Manager) *PrivateAccountAPI {
}
// ListAccounts will return a list of addresses for accounts this node manages.
func (s *PrivateAccountAPI) ListAccounts() ([]common.Address, error) {
accounts, err := s.am.Accounts()
if err != nil {
return nil, err
}
func (s *PrivateAccountAPI) ListAccounts() []common.Address {
accounts := s.am.Accounts()
addresses := make([]common.Address, len(accounts))
for i, acc := range accounts {
addresses[i] = acc.Address
}
return addresses, nil
return addresses
}
// NewAccount will create a new account and returns the address for the new account.
@ -446,16 +440,16 @@ func (s *PrivateAccountAPI) NewAccount(password string) (common.Address, error)
// UnlockAccount will unlock the account associated with the given address with
// the given password for duration seconds. If duration is nil it will use a
// default of 300 seconds. It returns an indication if the account was unlocked.
func (s *PrivateAccountAPI) UnlockAccount(addr common.Address, password string, duration *rpc.HexNumber) bool {
func (s *PrivateAccountAPI) UnlockAccount(addr common.Address, password string, duration *rpc.HexNumber) (bool, error) {
if duration == nil {
duration = rpc.NewHexNumber(300)
}
if err := s.am.TimedUnlock(addr, password, time.Duration(duration.Int())*time.Second); err != nil {
glog.V(logger.Info).Infof("%v\n", err)
return false
a := accounts.Account{Address: addr}
d := time.Duration(duration.Int64()) * time.Second
if err := s.am.TimedUnlock(a, password, d); err != nil {
return false, err
}
return true
return true, nil
}
// LockAccount will lock the account associated with the given address when it's unlocked.
@ -701,8 +695,8 @@ func (s *PublicBlockChainAPI) doCall(args CallArgs, blockNr rpc.BlockNumber) (st
// Retrieve the account state object to interact with
var from *state.StateObject
if args.From == (common.Address{}) {
accounts, err := s.am.Accounts()
if err != nil || len(accounts) == 0 {
accounts := s.am.Accounts()
if len(accounts) == 0 {
from = stateDb.GetOrNewStateObject(common.Address{})
} else {
from = stateDb.GetOrNewStateObject(accounts[0].Address)
@ -912,40 +906,17 @@ func NewPublicTransactionPoolAPI(e *Ethereum, gpo *GasPriceOracle) *PublicTransa
// subscriptionLoop listens for events on the global event mux and creates notifications for subscriptions.
func (s *PublicTransactionPoolAPI) subscriptionLoop() {
sub := s.eventMux.Subscribe(core.TxPreEvent{})
accountTimeout := time.NewTicker(10 * time.Second)
// only publish pending tx signed by one of the accounts in the node
accountSet := set.New()
accounts, _ := s.am.Accounts()
for _, acc := range accounts {
accountSet.Add(acc.Address)
}
for {
select {
case event := <-sub.Chan():
if event == nil {
continue
}
tx := event.Data.(core.TxPreEvent)
if from, err := tx.Tx.FromFrontier(); err == nil {
if accountSet.Has(from) {
s.muPendingTxSubs.Lock()
for id, sub := range s.pendingTxSubs {
if sub.Notify(tx.Tx.Hash()) == rpc.ErrNotificationNotFound {
delete(s.pendingTxSubs, id)
}
for event := range sub.Chan() {
tx := event.Data.(core.TxPreEvent)
if from, err := tx.Tx.FromFrontier(); err == nil {
if s.am.HasAddress(from) {
s.muPendingTxSubs.Lock()
for id, sub := range s.pendingTxSubs {
if sub.Notify(tx.Tx.Hash()) == rpc.ErrNotificationNotFound {
delete(s.pendingTxSubs, id)
}
s.muPendingTxSubs.Unlock()
}
}
case <-accountTimeout.C:
// refresh account list when accounts are added/removed from the node.
if accounts, err := s.am.Accounts(); err == nil {
accountSet.Clear()
for _, acc := range accounts {
accountSet.Add(acc.Address)
}
s.muPendingTxSubs.Unlock()
}
}
}
@ -1115,9 +1086,8 @@ func (s *PublicTransactionPoolAPI) GetTransactionReceipt(txHash common.Hash) (ma
}
// sign is a helper function that signs a transaction with the private key of the given address.
func (s *PublicTransactionPoolAPI) sign(address common.Address, tx *types.Transaction) (*types.Transaction, error) {
acc := accounts.Account{address}
signature, err := s.am.Sign(acc, tx.SigHash().Bytes())
func (s *PublicTransactionPoolAPI) sign(addr common.Address, tx *types.Transaction) (*types.Transaction, error) {
signature, err := s.am.Sign(addr, tx.SigHash().Bytes())
if err != nil {
return nil, err
}
@ -1210,10 +1180,10 @@ func (s *PublicTransactionPoolAPI) SendRawTransaction(encodedTx string) (string,
return tx.Hash().Hex(), nil
}
// Sign signs the given hash using the key that matches the address. The key must be unlocked in order to sign the
// hash.
func (s *PublicTransactionPoolAPI) Sign(address common.Address, hash common.Hash) (string, error) {
signature, error := s.am.Sign(accounts.Account{Address: address}, hash[:])
// Sign signs the given hash using the key that matches the address. The key must be
// unlocked in order to sign the hash.
func (s *PublicTransactionPoolAPI) Sign(addr common.Address, hash common.Hash) (string, error) {
signature, error := s.am.Sign(addr, hash[:])
return common.ToHex(signature), error
}
@ -1358,26 +1328,16 @@ func (s *PublicTransactionPoolAPI) SignTransaction(args SignTransactionArgs) (*S
// PendingTransactions returns the transactions that are in the transaction pool and have a from address that is one of
// the accounts this node manages.
func (s *PublicTransactionPoolAPI) PendingTransactions() ([]*RPCTransaction, error) {
accounts, err := s.am.Accounts()
if err != nil {
return nil, err
}
accountSet := set.New()
for _, account := range accounts {
accountSet.Add(account.Address)
}
func (s *PublicTransactionPoolAPI) PendingTransactions() []*RPCTransaction {
pending := s.txPool.GetTransactions()
transactions := make([]*RPCTransaction, 0)
for _, tx := range pending {
if from, _ := tx.FromFrontier(); accountSet.Has(from) {
from, _ := tx.FromFrontier()
if s.am.HasAddress(from) {
transactions = append(transactions, newRPCPendingTransaction(tx))
}
}
return transactions, nil
return transactions
}
// NewPendingTransaction creates a subscription that is triggered each time a transaction enters the transaction pool
@ -1856,8 +1816,8 @@ func (s *PublicBlockChainAPI) TraceCall(args CallArgs, blockNr rpc.BlockNumber)
// Retrieve the account state object to interact with
var from *state.StateObject
if args.From == (common.Address{}) {
accounts, err := s.am.Accounts()
if err != nil || len(accounts) == 0 {
accounts := s.am.Accounts()
if len(accounts) == 0 {
from = stateDb.GetOrNewStateObject(common.Address{})
} else {
from = stateDb.GetOrNewStateObject(accounts[0].Address)

View File

@ -363,13 +363,13 @@ func (s *Ethereum) ResetWithGenesisBlock(gb *types.Block) {
func (s *Ethereum) Etherbase() (eb common.Address, err error) {
eb = s.etherbase
if (eb == common.Address{}) {
addr, e := s.AccountManager().AddressByIndex(0)
if e != nil {
err = fmt.Errorf("etherbase address must be explicitly specified")
firstAccount, err := s.AccountManager().AccountByIndex(0)
eb = firstAccount.Address
if err != nil {
return eb, fmt.Errorf("etherbase address must be explicitly specified")
}
eb = common.HexToAddress(addr)
}
return
return eb, nil
}
// set in js console via admin interface or wrapper from cli flags

View File

@ -4,6 +4,7 @@
package eth
import (
"crypto/ecdsa"
"crypto/rand"
"math/big"
"sync"
@ -94,10 +95,9 @@ func (p *testTxPool) GetTransactions() types.Transactions {
}
// newTestTransaction create a new dummy transaction.
func newTestTransaction(from *crypto.Key, nonce uint64, datasize int) *types.Transaction {
func newTestTransaction(from *ecdsa.PrivateKey, nonce uint64, datasize int) *types.Transaction {
tx := types.NewTransaction(nonce, common.Address{}, big.NewInt(0), big.NewInt(100000), big.NewInt(0), make([]byte, datasize))
tx, _ = tx.SignECDSA(from.PrivateKey)
tx, _ = tx.SignECDSA(from)
return tx
}

View File

@ -17,7 +17,6 @@
package eth
import (
"crypto/rand"
"fmt"
"sync"
"testing"
@ -35,7 +34,7 @@ func init() {
// glog.SetV(6)
}
var testAccount = crypto.NewKey(rand.Reader)
var testAccount, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
// Tests that handshake failures are detected and reported correctly.
func TestStatusMsgErrors61(t *testing.T) { testStatusMsgErrors(t, 61) }

View File

@ -388,7 +388,7 @@ func (self *worker) makeCurrent(parent *types.Block, header *types.Header) error
work.family.Add(ancestor.Hash())
work.ancestors.Add(ancestor.Hash())
}
accounts, _ := self.eth.AccountManager().Accounts()
accounts := self.eth.AccountManager().Accounts()
// Keep track of transactions which return errors so they can be removed
work.remove = set.New()

View File

@ -22,23 +22,18 @@ import (
"fmt"
"io"
"math/big"
"path/filepath"
"runtime"
"strconv"
"strings"
"time"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/ethash"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/rlp"
)
@ -160,45 +155,39 @@ func runBlockTests(homesteadBlock *big.Int, bt map[string]*BlockTest, skipTests
}
return nil
}
func runBlockTest(homesteadBlock *big.Int, test *BlockTest) error {
ks := crypto.NewKeyStorePassphrase(filepath.Join(common.DefaultDataDir(), "keystore"), crypto.StandardScryptN, crypto.StandardScryptP)
am := accounts.NewManager(ks)
db, _ := ethdb.NewMemDatabase()
func runBlockTest(homesteadBlock *big.Int, test *BlockTest) error {
// import pre accounts & construct test genesis block & state root
_, err := test.InsertPreState(db, am)
if err != nil {
db, _ := ethdb.NewMemDatabase()
if _, err := test.InsertPreState(db); err != nil {
return fmt.Errorf("InsertPreState: %v", err)
}
cfg := &eth.Config{
ChainConfig: &core.ChainConfig{HomesteadBlock: homesteadBlock},
TestGenesisState: db,
TestGenesisBlock: test.Genesis,
Etherbase: common.Address{},
AccountManager: am,
PowShared: true,
}
ethereum, err := eth.New(&node.ServiceContext{EventMux: new(event.TypeMux)}, cfg)
core.WriteTd(db, test.Genesis.Hash(), test.Genesis.Difficulty())
core.WriteBlock(db, test.Genesis)
core.WriteCanonicalHash(db, test.Genesis.Hash(), test.Genesis.NumberU64())
core.WriteHeadBlockHash(db, test.Genesis.Hash())
evmux := new(event.TypeMux)
config := &core.ChainConfig{HomesteadBlock: homesteadBlock}
chain, err := core.NewBlockChain(db, config, ethash.NewShared(), evmux)
if err != nil {
return err
}
cm := ethereum.BlockChain()
//vm.Debug = true
validBlocks, err := test.TryBlocksInsert(cm)
validBlocks, err := test.TryBlocksInsert(chain)
if err != nil {
return err
}
lastblockhash := common.HexToHash(test.lastblockhash)
cmlast := cm.LastBlockHash()
cmlast := chain.LastBlockHash()
if lastblockhash != cmlast {
return fmt.Errorf("lastblockhash validation mismatch: want: %x, have: %x", lastblockhash, cmlast)
}
newDB, err := cm.State()
newDB, err := chain.State()
if err != nil {
return err
}
@ -206,21 +195,17 @@ func runBlockTest(homesteadBlock *big.Int, test *BlockTest) error {
return fmt.Errorf("post state validation failed: %v", err)
}
return test.ValidateImportedHeaders(cm, validBlocks)
return test.ValidateImportedHeaders(chain, validBlocks)
}
// InsertPreState populates the given database with the genesis
// accounts defined by the test.
func (t *BlockTest) InsertPreState(db ethdb.Database, am *accounts.Manager) (*state.StateDB, error) {
func (t *BlockTest) InsertPreState(db ethdb.Database) (*state.StateDB, error) {
statedb, err := state.New(common.Hash{}, db)
if err != nil {
return nil, err
}
for addrString, acct := range t.preAccounts {
addr, err := hex.DecodeString(addrString)
if err != nil {
return nil, err
}
code, err := hex.DecodeString(strings.TrimPrefix(acct.Code, "0x"))
if err != nil {
return nil, err
@ -233,16 +218,6 @@ func (t *BlockTest) InsertPreState(db ethdb.Database, am *accounts.Manager) (*st
if err != nil {
return nil, err
}
if acct.PrivateKey != "" {
privkey, err := hex.DecodeString(strings.TrimPrefix(acct.PrivateKey, "0x"))
err = crypto.ImportBlockTestKey(privkey)
err = am.TimedUnlock(common.BytesToAddress(addr), "", 999999*time.Second)
if err != nil {
return nil, err
}
}
obj := statedb.CreateAccount(common.HexToAddress(addrString))
obj.SetCode(code)
obj.SetBalance(balance)