Merge pull request #3226 from karalabe/azure-gethstore

travis, build: implement uploading archives to azure
This commit is contained in:
Felix Lange 2016-11-03 10:41:20 +01:00 committed by GitHub
commit d0c820acd6
60 changed files with 11437 additions and 44 deletions

View File

@ -6,6 +6,8 @@ matrix:
- os: linux
dist: trusty
go: 1.5.4
env:
- GO15VENDOREXPERIMENT=1
- os: linux
dist: trusty
go: 1.6.2
@ -15,27 +17,43 @@ matrix:
- os: osx
go: 1.7
# This builder does the PPA upload (and nothing else).
# This builder does the Ubuntu PPA and Linux Azure uploads
- os: linux
dist: trusty
go: 1.7
env: PPA
env:
- ubuntu-ppa
- azure-linux
addons:
apt:
packages:
- gcc-multilib
- devscripts
- debhelper
- dput
script:
- go run build/ci.go debsrc -signer "Felix Lange (Geth CI Testing Key) <fjl@twurst.com>" -upload ppa:lp-fjl/geth-ci-testing
- go run build/ci.go install
- go run build/ci.go archive -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
- go run build/ci.go install -arch 386
- go run build/ci.go archive -arch 386 -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
# This builder does the OSX Azure uploads
- os: osx
go: 1.7
env:
- azure-osx
script:
- go run build/ci.go install
- go run build/ci.go archive -type zip -signer OSX_SIGNING_KEY -upload gethstore/builds
- go run build/ci.go install -arch 386
- go run build/ci.go archive -arch 386 -type zip -signer OSX_SIGNING_KEY -upload gethstore/builds
install:
- go get golang.org/x/tools/cmd/cover
script:
- go run build/ci.go install
- go run build/ci.go test -coverage -vet
after_success:
# - go run build/ci.go archive -type tar
notifications:
webhooks:

View File

@ -23,12 +23,12 @@ Usage: go run ci.go <command> <command flags/arguments>
Available commands are:
install [ packages... ] -- builds packages and executables
test [ -coverage ] [ -vet ] [ packages... ] -- runs the tests
archive [ -type zip|tar ] -- archives build artefacts
importkeys -- imports signing keys from env
debsrc [ -sign key-id ] [ -upload dest ] -- creates a debian source package
xgo [ options ] -- cross builds according to options
install [-arch architecture] [ packages... ] -- builds packages and executables
test [ -coverage ] [ -vet ] [ packages... ] -- runs the tests
archive [-arch architecture] [ -type zip|tar ] [ -signer key-envvar ] [ -upload dest ] -- archives build artefacts
importkeys -- imports signing keys from env
debsrc [ -signer key-id ] [ -upload dest ] -- creates a debian source package
xgo [ options ] -- cross builds according to options
For all commands, -n prevents execution of external programs (dry run mode).
@ -40,6 +40,8 @@ import (
"encoding/base64"
"flag"
"fmt"
"go/parser"
"go/token"
"io/ioutil"
"log"
"os"
@ -49,7 +51,7 @@ import (
"strings"
"time"
"../internal/build"
"github.com/ethereum/go-ethereum/internal/build"
)
var (
@ -130,6 +132,9 @@ func main() {
// Compiling
func doInstall(cmdline []string) {
var (
arch = flag.String("arch", "", "Architecture to cross build for")
)
flag.CommandLine.Parse(cmdline)
env := build.Env()
@ -146,11 +151,38 @@ func doInstall(cmdline []string) {
if flag.NArg() > 0 {
packages = flag.Args()
}
goinstall := goTool("install", buildFlags(env)...)
if *arch == "" || *arch == runtime.GOARCH {
goinstall := goTool("install", buildFlags(env)...)
goinstall.Args = append(goinstall.Args, "-v")
goinstall.Args = append(goinstall.Args, packages...)
build.MustRun(goinstall)
return
}
// Seems we are cross compiling, work around forbidden GOBIN
goinstall := goToolArch(*arch, "install", buildFlags(env)...)
goinstall.Args = append(goinstall.Args, "-v")
goinstall.Args = append(goinstall.Args, []string{"-buildmode", "archive"}...)
goinstall.Args = append(goinstall.Args, packages...)
build.MustRun(goinstall)
if cmds, err := ioutil.ReadDir("cmd"); err == nil {
for _, cmd := range cmds {
pkgs, err := parser.ParseDir(token.NewFileSet(), filepath.Join(".", "cmd", cmd.Name()), nil, parser.PackageClauseOnly)
if err != nil {
log.Fatal(err)
}
for name, _ := range pkgs {
if name == "main" {
gobuild := goToolArch(*arch, "build", buildFlags(env)...)
gobuild.Args = append(gobuild.Args, "-v")
gobuild.Args = append(gobuild.Args, []string{"-o", filepath.Join(GOBIN, cmd.Name())}...)
gobuild.Args = append(gobuild.Args, "."+string(filepath.Separator)+filepath.Join("cmd", cmd.Name()))
build.MustRun(gobuild)
break
}
}
}
}
}
func buildFlags(env build.Environment) (flags []string) {
@ -173,13 +205,22 @@ func buildFlags(env build.Environment) (flags []string) {
}
func goTool(subcmd string, args ...string) *exec.Cmd {
return goToolArch(runtime.GOARCH, subcmd, args...)
}
func goToolArch(arch string, subcmd string, args ...string) *exec.Cmd {
gocmd := filepath.Join(runtime.GOROOT(), "bin", "go")
cmd := exec.Command(gocmd, subcmd)
cmd.Args = append(cmd.Args, args...)
cmd.Env = []string{
"GO15VENDOREXPERIMENT=1",
"GOPATH=" + build.GOPATH(),
"GOBIN=" + GOBIN,
}
if arch == "" || arch == runtime.GOARCH {
cmd.Env = append(cmd.Env, "GOBIN="+GOBIN)
} else {
cmd.Env = append(cmd.Env, "CGO_ENABLED=1")
cmd.Env = append(cmd.Env, "GOARCH="+arch)
}
for _, e := range os.Environ() {
if strings.HasPrefix(e, "GOPATH=") || strings.HasPrefix(e, "GOBIN=") {
@ -239,8 +280,11 @@ func doTest(cmdline []string) {
func doArchive(cmdline []string) {
var (
atype = flag.String("type", "zip", "Type of archive to write (zip|tar)")
ext string
arch = flag.String("arch", runtime.GOARCH, "Architecture cross packaging")
atype = flag.String("type", "zip", "Type of archive to write (zip|tar)")
signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. LINUX_SIGNING_KEY)`)
upload = flag.String("upload", "", `Destination to upload the archives (usually "gethstore/builds")`)
ext string
)
flag.CommandLine.Parse(cmdline)
switch *atype {
@ -255,18 +299,24 @@ func doArchive(cmdline []string) {
env := build.Env()
maybeSkipArchive(env)
base := archiveBasename(env)
base := archiveBasename(*arch, env)
if err := build.WriteArchive("geth-"+base, ext, gethArchiveFiles); err != nil {
log.Fatal(err)
}
if err := build.WriteArchive("geth-alltools-"+base, ext, allToolsArchiveFiles); err != nil {
log.Fatal(err)
}
for _, archive := range []string{"geth-" + base + ext, "geth-alltools-" + base + ext} {
if err := archiveUpload(archive, *upload, *signer); err != nil {
log.Fatal(err)
}
}
}
func archiveBasename(env build.Environment) string {
func archiveBasename(arch string, env build.Environment) string {
// date := time.Now().UTC().Format("200601021504")
platform := runtime.GOOS + "-" + runtime.GOARCH
platform := runtime.GOOS + "-" + arch
archive := platform + "-" + build.VERSION()
if env.Commit != "" {
archive += "-" + env.Commit[:8]
@ -274,6 +324,36 @@ func archiveBasename(env build.Environment) string {
return archive
}
func archiveUpload(archive string, blobstore string, signer string) error {
// If signing was requested, generate the signature files
if signer != "" {
pgpkey, err := base64.StdEncoding.DecodeString(os.Getenv(signer))
if err != nil {
return fmt.Errorf("invalid base64 %s", signer)
}
if err := build.PGPSignFile(archive, archive+".asc", string(pgpkey)); err != nil {
return err
}
}
// If uploading to Azure was requested, push the archive possibly with its signature
if blobstore != "" {
auth := build.AzureBlobstoreConfig{
Account: strings.Split(blobstore, "/")[0],
Token: os.Getenv("AZURE_BLOBSTORE_TOKEN"),
Container: strings.SplitN(blobstore, "/", 2)[1],
}
if err := build.AzureBlobstoreUpload(archive, archive, auth); err != nil {
return err
}
if signer != "" {
if err := build.AzureBlobstoreUpload(archive+".asc", archive+".asc", auth); err != nil {
return err
}
}
}
return nil
}
// skips archiving for some build configurations.
func maybeSkipArchive(env build.Environment) {
if env.IsPullRequest {

58
internal/build/azure.go Normal file
View File

@ -0,0 +1,58 @@
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package build
import (
"os"
"github.com/Azure/azure-sdk-for-go/storage"
)
// AzureBlobstoreConfig is an authentication and configuration struct containing
// the data needed by the Azure SDK to interact with a speicifc container in the
// blobstore.
type AzureBlobstoreConfig struct {
Account string // Account name to authorize API requests with
Token string // Access token for the above account
Container string // Blob container to upload files into
}
// AzureBlobstoreUpload uploads a local file to the Azure Blob Storage. Note, this
// method assumes a max file size of 64MB (Azure limitation). Larger files will
// need a multi API call approach implemented.
//
// See: https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx#Anchor_3
func AzureBlobstoreUpload(path string, name string, config AzureBlobstoreConfig) error {
// Create an authenticated client against the Azure cloud
rawClient, err := storage.NewBasicClient(config.Account, config.Token)
if err != nil {
return err
}
client := rawClient.GetBlobService()
// Stream the file to upload into the designated blobstore container
in, err := os.Open(path)
if err != nil {
return err
}
defer in.Close()
info, err := in.Stat()
if err != nil {
return err
}
return client.CreateBlockBlobFromReader(config.Container, name, uint64(info.Size()), in, nil)
}

58
internal/build/pgp.go Normal file
View File

@ -0,0 +1,58 @@
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// signFile reads the contents of an input file and signs it (in armored format)
// with the key provided, placing the signature into the output file.
package build
import (
"bytes"
"fmt"
"os"
"golang.org/x/crypto/openpgp"
)
// PGPSignFile parses a PGP private key from the specified string and creates a
// signature file into the output parameter of the input file.
//
// Note, this method assumes a single key will be container in the pgpkey arg,
// furthermore that it is in armored format.
func PGPSignFile(input string, output string, pgpkey string) error {
// Parse the keyring and make sure we only have a single private key in it
keys, err := openpgp.ReadArmoredKeyRing(bytes.NewBufferString(pgpkey))
if err != nil {
return err
}
if len(keys) != 1 {
return fmt.Errorf("key count mismatch: have %d, want %d", len(keys), 1)
}
// Create the input and output streams for signing
in, err := os.Open(input)
if err != nil {
return err
}
defer in.Close()
out, err := os.Create(output)
if err != nil {
return err
}
defer out.Close()
// Generate the signature and return
return openpgp.ArmoredDetachSignText(out, keys[0], in, nil)
}

View File

@ -2,16 +2,17 @@
github.com/ethereum/go-ethereum
# import
github.com/Azure/azure-sdk-for-go v5.0.0-beta-5-gbd73d95
github.com/cespare/cp 165db2f
github.com/davecgh/go-spew v1.0.0-3-g6d21280
github.com/davecgh/go-spew v1.0.0-9-g346938d
github.com/ethereum/ethash v23.1-249-g214d4c0
github.com/fatih/color v1.1.0-4-gbf82308
github.com/gizak/termui d29684e
github.com/golang/snappy d9eb7a3
github.com/hashicorp/golang-lru 0a025b7
github.com/huin/goupnp 97f671e
github.com/huin/goupnp 949b8a7
github.com/jackpal/go-nat-pmp v1.0.1-4-g1fa385a
github.com/mattn/go-colorable v0.0.6-6-g6c903ff
github.com/mattn/go-colorable v0.0.6-7-g6e26b35
github.com/mattn/go-isatty 66b8e73
github.com/mattn/go-runewidth v0.0.1-10-g737072b
github.com/mitchellh/go-wordwrap ad45545
@ -24,11 +25,11 @@ github.com/robertkrimen/otto bf1c379
github.com/rs/cors v1.0
github.com/rs/xhandler v1.0-1-ged27b6f
github.com/syndtr/goleveldb 6b4daa5
golang.org/x/crypto ca7e7f1
golang.org/x/net b336a97
golang.org/x/crypto 9477e0b
golang.org/x/net 4bb47a1
golang.org/x/sys c200b10
golang.org/x/text a8b3843
golang.org/x/tools 0db92ca
golang.org/x/tools 1529f88
gopkg.in/check.v1 4f90aea
gopkg.in/fatih/set.v0 v0.1.0-3-g27c4092
gopkg.in/karalabe/cookiejar.v2 8dcd6a7

32
vendor/github.com/Azure/azure-sdk-for-go/.gitignore generated vendored Normal file
View File

@ -0,0 +1,32 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof
# Editor swap files
*.swp
*~
.DS_Store
# ignore vendor/
vendor/

33
vendor/github.com/Azure/azure-sdk-for-go/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,33 @@
sudo: false
language: go
go: 1.7
install:
- go get -u github.com/golang/lint/golint
- go get -u github.com/Masterminds/glide
- go get -u golang.org/x/net/context
- go get -u github.com/HewlettPackard/gas
- go get -u gopkg.in/godo.v2/cmd/godo
- export GO15VENDOREXPERIMENT=1
- glide install
script:
- gas -skip=management/examples/*.go -skip=*vendor* -skip=Gododir/* ./... | tee gas-scan.txt
- test -z "$(grep 'Severity:\s*HIGH' gas-scan.txt)"
- test -z "$(gofmt -s -l $(find ./arm/* -type d -print) | tee /dev/stderr)"
- test -z "$(gofmt -s -l -w management | tee /dev/stderr)"
- test -z "$(gofmt -s -l -w storage | tee /dev/stderr)"
- test -z "$(gofmt -s -l -w Gododir | tee /dev/stderr)"
- test -z "$(go build $(find ./* -type d -print | grep -v '^./vendor$') | tee /dev/stderr)"
- test -z "$(go vet $(find ./arm/* -type d -print) | tee /dev/stderr)"
- test -z "$(golint ./arm/... | tee /dev/stderr)"
- go test -v ./storage/... -check.v
- test -z "$(golint ./storage/... | tee /dev/stderr)"
- go vet ./storage/...
- go test -v ./management/...
- test -z "$(golint ./management/... | grep -v 'should have comment' | grep -v 'stutters' | tee /dev/stderr)"
- go vet ./management/...
- test -z "$(golint ./Gododir/... | tee /dev/stderr)"
- go vet ./Gododir/...

280
vendor/github.com/Azure/azure-sdk-for-go/CHANGELOG.md generated vendored Normal file
View File

@ -0,0 +1,280 @@
# CHANGELOG
-----
## `v6.0.0-beta`
| api | version | note |
|:-------------------------------|:-------------------|:-----------------------------------|
| arm/authorization | no change | code refactoring |
| arm/batch | no change | code refactoring |
| arm/compute | no change | code refactoring |
| arm/containerservice | 2016-03-30 | return |
| arm/datalake-analytics/account | 2015-10-01-preview | new |
| arm/datalake-store/filesystem | no change | moved to datalake-store/filesystem |
| arm/eventhub | no change | code refactoring |
| arm/intune | no change | code refactoring |
| arm/iothub | no change | code refactoring |
| arm/keyvault | no change | code refactoring |
| arm/mediaservices | no change | code refactoring |
| arm/network | no change | code refactoring |
| arm/notificationhubs | no change | code refactoring |
| arm/redis | no change | code refactoring |
| arm/resources/resources | no change | code refactoring |
| arm/resources/links | 2016-09-01 | new |
| arm/resources/locks | 2016-09-01 | updated |
| arm/resources/policy | no change | code refactoring |
| arm/resources/resources | 2016-09-01 | updated |
| arm/servermanagement | 2016-07-01-preview | updated |
| arm/web | no change | code refactoring |
- storage: Added blob lease functionality and tests
## `v5.0.0-beta`
| api | version | note |
|:------------------------------|:--------------------|:-----------------|
| arm/network | 2016-09-01 | updated |
| arm/servermanagement | 2015-07-01-preview | new |
| arm/eventhub | 2015-08-01 | new |
| arm/containerservice | -- | removed |
| arm/resources/subscriptions | no change | code refactoring |
| arm/resources/features | no change | code refactoring |
| arm/resources/resources | no change | code refactoring |
| arm/datalake-store/accounts | no change | code refactoring |
| arm/datalake-store/filesystem | no change | code refactoring |
| arm/notificationhubs | no change | code refactoring |
| arm/redis | no change | code refactoring |
- storage: Add more file storage share operations.
- azure-rest-api-specs/commit/b8cdc2c50a0872fc0039f20c2b6b33aa0c2af4bf
- Uses go-autorest v7.2.1
## `v4.0.0-beta`
- arm/logic: breaking change in package logic.
- arm: parameter validation code added in all arm packages.
- Uses go-autorest v7.2.0.
## `v3.2.0-beta`
| api | version | note |
|:----------------------------|:--------------------|:----------|
| arm/mediaservices | 2015-10-01 | new |
| arm/keyvault | 2015-06-01 | new |
| arm/iothub | 2016-02-03 | new |
| arm/datalake-store | 2015-12-01 | new |
| arm/network | 2016-06-01 | updated |
| arm/resources/resources | 2016-07-01 | updated |
| arm/resources/policy | 2016-04-01 | updated |
| arm/servicebus | 2015-08-01 | updated |
- arm: uses go-autorest version v7.1.0.
- storage: fix for operating on blobs names containing special characters.
- storage: add SetBlobProperties(), update BlobProperties response fields.
- storage: make storage client work correctly with read-only secondary account.
- storage: add Azure Storage Emulator support.
## `v3.1.0-beta`
- Added a new arm/compute/containerservice (2016-03-30) package
- Reintroduced NewxxClientWithBaseURI method.
- Uses go-autorest version - v7.0.7.
## `v3.0.0-beta`
This release brings the Go SDK ARM packages up-to-date with Azure ARM Swagger files for most
services. Since the underlying [Swagger files](https://github.com/Azure/azure-rest-api-specs)
continue to change substantially, the ARM packages are still in *beta* status.
The ARM packages now align with the following API versions (*highlighted* packages are new or
updated in this release):
| api | version | note |
|:----------------------------|:--------------------|:----------|
| arm/authorization | 2015-07-01 | no change |
| arm/intune | 2015-01-14-preview | no change |
| arm/notificationhubs | 2014-09-01 | no change |
| arm/resources/features | 2015-12-01 | no change |
| arm/resources/subscriptions | 2015-11-01 | no change |
| arm/web | 2015-08-01 | no change |
| arm/cdn | 2016-04-02 | updated |
| arm/compute | 2016-03-30 | updated |
| arm/dns | 2016-04-01 | updated |
| arm/logic | 2015-08-01-preview | updated |
| arm/network | 2016-03-30 | updated |
| arm/redis | 2016-04-01 | updated |
| arm/resources/resources | 2016-02-01 | updated |
| arm/resources/policy | 2015-10-01-preview | updated |
| arm/resources/locks | 2015-01-01 | updated (resources/authorization earlier)|
| arm/scheduler | 2016-03-01 | updated |
| arm/storage | 2016-01-01 | updated |
| arm/search | 2015-02-28 | updated |
| arm/batch | 2015-12-01 | new |
| arm/cognitiveservices | 2016-02-01-preview | new |
| arm/devtestlabs | 2016-05-15 | new |
| arm/machinelearning | 2016-05-01-preview | new |
| arm/powerbiembedded | 2016-01-29 | new |
| arm/mobileengagement | 2014-12-01 | new |
| arm/servicebus | 2014-09-01 | new |
| arm/sql | 2015-05-01 | new |
| arm/trafficmanager | 2015-11-01 | new |
Below are some design changes.
- Removed Api version from method arguments.
- Removed New...ClientWithBaseURI() method in all clients. BaseURI value is set in client.go.
- Uses go-autorest version v7.0.6.
## `v2.2.0-beta`
- Uses go-autorest version v7.0.5.
- Update version of pacakges "jwt-go" and "crypto" in glide.lock.
## `v2.1.1-beta`
- arm: Better error messages for long running operation failures (Uses go-autorest version v7.0.4).
## `v2.1.0-beta`
- arm: Uses go-autorest v7.0.3 (polling related updates).
- arm: Cancel channel argument added in long-running calls.
- storage: Allow caller to provide headers for DeleteBlob methods.
- storage: Enables connection sharing with http keepalive.
- storage: Add BlobPrefixes and Delimiter to BlobListResponse
## `v2.0.0-beta`
- Uses go-autorest v6.0.0 (Polling and Asynchronous requests related changes).
## `v0.5.0-beta`
Updated following packages to new API versions:
- arm/resources/features 2015-12-01
- arm/resources/resources 2015-11-01
- arm/resources/subscriptions 2015-11-01
### Changes
- SDK now uses go-autorest v3.0.0.
## `v0.4.0-beta`
This release brings the Go SDK ARM packages up-to-date with Azure ARM Swagger files for most
services. Since the underlying [Swagger files](https://github.com/Azure/azure-rest-api-specs)
continue to change substantially, the ARM packages are still in *beta* status.
The ARM packages now align with the following API versions (*highlighted* packages are new or
updated in this release):
- *arm/authorization 2015-07-01*
- *arm/cdn 2015-06-01*
- arm/compute 2015-06-15
- arm/dns 2015-05-04-preview
- *arm/intune 2015-01-14-preview*
- arm/logic 2015-02-01-preview
- *arm/network 2015-06-15*
- *arm/notificationhubs 2014-09-01*
- arm/redis 2015-08-01
- *arm/resources/authorization 2015-01-01*
- *arm/resources/features 2014-08-01-preview*
- *arm/resources/resources 2014-04-01-preview*
- *arm/resources/subscriptions 2014-04-01-preview*
- *arm/scheduler 2016-01-01*
- arm/storage 2015-06-15
- arm/web 2015-08-01
### Changes
- Moved the arm/authorization, arm/features, arm/resources, and arm/subscriptions packages under a new, resources, package (to reflect the corresponding Swagger structure)
- Added a new arm/authoriation (2015-07-01) package
- Added a new arm/cdn (2015-06-01) package
- Added a new arm/intune (2015-01-14-preview) package
- Udated arm/network (2015-06-01)
- Added a new arm/notificationhubs (2014-09-01) package
- Updated arm/scheduler (2016-01-01) package
-----
## `v0.3.0-beta`
- Corrected unintentional struct field renaming and client renaming in v0.2.0-beta
-----
## `v0.2.0-beta`
- Added support for DNS, Redis, and Web site services
- Updated Storage service to API version 2015-06-15
- Updated Network to include routing table support
- Address https://github.com/Azure/azure-sdk-for-go/issues/232
- Address https://github.com/Azure/azure-sdk-for-go/issues/231
- Address https://github.com/Azure/azure-sdk-for-go/issues/230
- Address https://github.com/Azure/azure-sdk-for-go/issues/224
- Address https://github.com/Azure/azure-sdk-for-go/issues/184
- Address https://github.com/Azure/azure-sdk-for-go/issues/183
------
## `v0.1.1-beta`
- Improves the UserAgent string to disambiguate arm packages from others in the SDK
- Improves setting the http.Response into generated results (reduces likelihood of a nil reference)
- Adds gofmt, golint, and govet to Travis CI for the arm packages
##### Fixed Issues
- https://github.com/Azure/azure-sdk-for-go/issues/196
- https://github.com/Azure/azure-sdk-for-go/issues/213
------
## v0.1.0-beta
This release addresses the issues raised against the alpha release and adds more features. Most
notably, to address the challenges of encoding JSON
(see the [comments](https://github.com/Azure/go-autorest#handling-empty-values) in the
[go-autorest](https://github.com/Azure/go-autorest) package) by using pointers for *all* structure
fields (with the exception of enumerations). The
[go-autorest/autorest/to](https://github.com/Azure/go-autorest/tree/master/autorest/to) package
provides helpers to convert to / from pointers. The examples demonstrate their usage.
Additionally, the packages now align with Go coding standards and pass both `golint` and `govet`.
Accomplishing this required renaming various fields and parameters (such as changing Url to URL).
##### Changes
- Changed request / response structures to use pointer fields.
- Changed methods to return `error` instead of `autorest.Error`.
- Re-divided methods to ease asynchronous requests.
- Added paged results support.
- Added a UserAgent string.
- Added changes necessary to pass golint and govet.
- Updated README.md with details on asynchronous requests and paging.
- Saved package dependencies through Godep (for the entire SDK).
##### Fixed Issues:
- https://github.com/Azure/azure-sdk-for-go/issues/205
- https://github.com/Azure/azure-sdk-for-go/issues/206
- https://github.com/Azure/azure-sdk-for-go/issues/211
- https://github.com/Azure/azure-sdk-for-go/issues/212
-----
## v0.1.0-alpha
This release introduces the Azure Resource Manager packages generated from the corresponding
[Swagger API](http://swagger.io) [definitions](https://github.com/Azure/azure-rest-api-specs).

202
vendor/github.com/Azure/azure-sdk-for-go/LICENSE generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2016 Microsoft Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

102
vendor/github.com/Azure/azure-sdk-for-go/README.md generated vendored Normal file
View File

@ -0,0 +1,102 @@
# Microsoft Azure SDK for Go
This project provides various Go packages to perform operations
on Microsoft Azure REST APIs.
[![GoDoc](https://godoc.org/github.com/Azure/azure-sdk-for-go?status.svg)](https://godoc.org/github.com/Azure/azure-sdk-for-go) [![Build Status](https://travis-ci.org/Azure/azure-sdk-for-go.svg?branch=master)](https://travis-ci.org/Azure/azure-sdk-for-go)
> **NOTE:** This repository is under heavy ongoing development and
is likely to break over time. We currently do not have any releases
yet. If you are planning to use the repository, please consider vendoring
the packages in your project and update them when a stable tag is out.
# Packages
## Azure Resource Manager (ARM)
[About ARM](/arm/README.md)
- [authorization](/arm/authorization)
- [batch](/arm/batch)
- [cdn](/arm/cdn)
- [cognitiveservices](/arm/cognitiveservices)
- [compute](/arm/compute)
- [containerservice](/arm/containerservice)
- [datalake-store](/arm/datalake-store)
- [devtestlabs](/arm/devtestlabs)
- [dns](/arm/dns)
- [intune](/arm/intune)
- [iothub](/arm/iothub)
- [keyvault](/arm/keyvault)
- [logic](/arm/logic)
- [machinelearning](/arm/machinelearning)
- [mediaservices](/arm/mediaservices)
- [mobileengagement](/arm/mobileengagement)
- [network](/arm/network)
- [notificationhubs](/arm/notificationhubs)
- [powerbiembedded](/arm/powerbiembedded)
- [redis](/arm/redis)
- [resources](/arm/resources)
- [scheduler](/arm/scheduler)
- [search](/arm/search)
- [servicebus](/arm/servicebus)
- [sql](/arm/sql)
- [storage](/arm/storage)
- [trafficmanager](/arm/trafficmanager)
- [web](/arm/web)
## Azure Service Management (ASM), aka classic deployment
[About ASM](/management/README.md)
- [affinitygroup](/management/affinitygroup)
- [hostedservice](/management/hostedservice)
- [location](/management/location)
- [networksecuritygroup](/management/networksecuritygroup)
- [osimage](/management/osimage)
- [sql](/management/sql)
- [storageservice](/management/storageservice)
- [virtualmachine](/management/virtualmachine)
- [virtualmachinedisk](/management/virtualmachinedisk)
- [virtualmachineimage](/management/virtualmachineimage)
- [virtualnetwork](/management/virtualnetwork)
- [vmutils](/management/vmutils)
## Azure Storage SDK for Go
[About Storage](/storage/README.md)
- [storage](/storage)
# Installation
- [Install Go 1.7](https://golang.org/dl/).
- Go get the SDK:
```
$ go get -d github.com/Azure/azure-sdk-for-go
```
> **IMPORTANT:** We highly suggest vendoring Azure SDK for Go as a dependency. For vendoring dependencies, Azure SDK for Go uses [glide](https://github.com/Masterminds/glide). If you haven't already, install glide. Navigate to your project directory and install the dependencies.
```
$ cd your/project
$ glide create
$ glide install
```
# Documentation
Read the Godoc of the repository at [Godoc.org](http://godoc.org/github.com/Azure/azure-sdk-for-go/).
# Contribute
If you would like to become an active contributor to this project please follow the instructions provided in [Microsoft Azure Projects Contribution Guidelines](http://azure.github.io/guidelines/).
# License
This project is published under [Apache 2.0 License](LICENSE).
-----
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.

53
vendor/github.com/Azure/azure-sdk-for-go/glide.lock generated vendored Normal file
View File

@ -0,0 +1,53 @@
hash: 7407050cee9bb9ce89e23ef26bce4051cce63d558338a4937f027a18b789e3a1
updated: 2016-10-25T11:34:48.4987356-07:00
imports:
- name: github.com/Azure/go-autorest
version: 0781901f19f1e7db3034d97ec57af753db0bf808
subpackages:
- autorest
- autorest/azure
- autorest/date
- autorest/to
- autorest/validation
- name: github.com/dgrijalva/jwt-go
version: 24c63f56522a87ec5339cc3567883f1039378fdb
- name: github.com/howeyc/gopass
version: f5387c492211eb133053880d23dfae62aa14123d
- name: github.com/mattn/go-colorable
version: 6c903ff4aa50920ca86087a280590b36b3152b9c
- name: github.com/mattn/go-isatty
version: 66b8e73f3f5cda9f96b69efd03dd3d7fc4a5cdb8
- name: github.com/mgutz/ansi
version: c286dcecd19ff979eeb73ea444e479b903f2cfcb
- name: github.com/mgutz/minimist
version: 39eb8cf573ca29344bd7d7e6ba4d7febdebd37a9
- name: github.com/mgutz/str
version: 968bf66e3da857419e4f6e71b2d5c9ae95682dc4
- name: github.com/mgutz/to
version: 2a0bcba0661696e339461f5efb2273f4459dd1b9
- name: github.com/MichaelTJones/walk
version: 3af09438b0ab0e8f296410bfa646a7e635ea1fc0
- name: github.com/nozzle/throttler
version: d9b45f19996c645d38c9266d1f5cf1990e930119
- name: github.com/satori/uuid
version: b061729afc07e77a8aa4fad0a2fd840958f1942a
- name: golang.org/x/crypto
version: 84e98f45760e87786b7f24603b8166a6fa09811d
subpackages:
- pkcs12
- pkcs12/internal/rc2
- ssh/terminal
- name: golang.org/x/sys
version: c200b10b5d5e122be351b67af224adc6128af5bf
subpackages:
- unix
- name: gopkg.in/check.v1
version: 4f90aeace3a26ad7021961c297b22c42160c7b25
- name: gopkg.in/godo.v2
version: b5fd2f0bef1ebe832e628cfad18ab1cc707f65a1
subpackages:
- glob
- util
- watcher
- watcher/fswatch
testImports: []

12
vendor/github.com/Azure/azure-sdk-for-go/glide.yaml generated vendored Normal file
View File

@ -0,0 +1,12 @@
package: github.com/Azure/azure-sdk-for-go
import:
- package: github.com/Azure/go-autorest
subpackages:
- /autorest
- autorest/azure
- autorest/date
- autorest/to
- package: golang.org/x/crypto
subpackages:
- /pkcs12
- package: gopkg.in/check.v1

View File

@ -0,0 +1,5 @@
# Azure Storage SDK for Go
The `github.com/Azure/azure-sdk-for-go/storage` package is used to perform operations in Azure Storage Service. To manage your storage accounts (Azure Resource Manager / ARM), use the [github.com/Azure/azure-sdk-for-go/arm/storage](../arm/storage) package. For your classic storage accounts (Azure Service Management / ASM), use [github.com/Azure/azure-sdk-for-go/management/storageservice](../management/storageservice) package.
This package includes support for [Azure Storage Emulator](https://azure.microsoft.com/documentation/articles/storage-use-emulator/)

1278
vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,551 @@
// Package storage provides clients for Microsoft Azure Storage Services.
package storage
import (
"bytes"
"encoding/base64"
"encoding/json"
"encoding/xml"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"regexp"
"sort"
"strconv"
"strings"
)
const (
// DefaultBaseURL is the domain name used for storage requests when a
// default client is created.
DefaultBaseURL = "core.windows.net"
// DefaultAPIVersion is the Azure Storage API version string used when a
// basic client is created.
DefaultAPIVersion = "2015-02-21"
defaultUseHTTPS = true
// StorageEmulatorAccountName is the fixed storage account used by Azure Storage Emulator
StorageEmulatorAccountName = "devstoreaccount1"
// StorageEmulatorAccountKey is the the fixed storage account used by Azure Storage Emulator
StorageEmulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="
blobServiceName = "blob"
tableServiceName = "table"
queueServiceName = "queue"
fileServiceName = "file"
storageEmulatorBlob = "127.0.0.1:10000"
storageEmulatorTable = "127.0.0.1:10002"
storageEmulatorQueue = "127.0.0.1:10001"
)
// Client is the object that needs to be constructed to perform
// operations on the storage account.
type Client struct {
// HTTPClient is the http.Client used to initiate API
// requests. If it is nil, http.DefaultClient is used.
HTTPClient *http.Client
accountName string
accountKey []byte
useHTTPS bool
baseURL string
apiVersion string
}
type storageResponse struct {
statusCode int
headers http.Header
body io.ReadCloser
}
type odataResponse struct {
storageResponse
odata odataErrorMessage
}
// AzureStorageServiceError contains fields of the error response from
// Azure Storage Service REST API. See https://msdn.microsoft.com/en-us/library/azure/dd179382.aspx
// Some fields might be specific to certain calls.
type AzureStorageServiceError struct {
Code string `xml:"Code"`
Message string `xml:"Message"`
AuthenticationErrorDetail string `xml:"AuthenticationErrorDetail"`
QueryParameterName string `xml:"QueryParameterName"`
QueryParameterValue string `xml:"QueryParameterValue"`
Reason string `xml:"Reason"`
StatusCode int
RequestID string
}
type odataErrorMessageMessage struct {
Lang string `json:"lang"`
Value string `json:"value"`
}
type odataErrorMessageInternal struct {
Code string `json:"code"`
Message odataErrorMessageMessage `json:"message"`
}
type odataErrorMessage struct {
Err odataErrorMessageInternal `json:"odata.error"`
}
// UnexpectedStatusCodeError is returned when a storage service responds with neither an error
// nor with an HTTP status code indicating success.
type UnexpectedStatusCodeError struct {
allowed []int
got int
}
func (e UnexpectedStatusCodeError) Error() string {
s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) }
got := s(e.got)
expected := []string{}
for _, v := range e.allowed {
expected = append(expected, s(v))
}
return fmt.Sprintf("storage: status code from service response is %s; was expecting %s", got, strings.Join(expected, " or "))
}
// Got is the actual status code returned by Azure.
func (e UnexpectedStatusCodeError) Got() int {
return e.got
}
// NewBasicClient constructs a Client with given storage service name and
// key.
func NewBasicClient(accountName, accountKey string) (Client, error) {
if accountName == StorageEmulatorAccountName {
return NewEmulatorClient()
}
return NewClient(accountName, accountKey, DefaultBaseURL, DefaultAPIVersion, defaultUseHTTPS)
}
//NewEmulatorClient contructs a Client intended to only work with Azure
//Storage Emulator
func NewEmulatorClient() (Client, error) {
return NewClient(StorageEmulatorAccountName, StorageEmulatorAccountKey, DefaultBaseURL, DefaultAPIVersion, false)
}
// NewClient constructs a Client. This should be used if the caller wants
// to specify whether to use HTTPS, a specific REST API version or a custom
// storage endpoint than Azure Public Cloud.
func NewClient(accountName, accountKey, blobServiceBaseURL, apiVersion string, useHTTPS bool) (Client, error) {
var c Client
if accountName == "" {
return c, fmt.Errorf("azure: account name required")
} else if accountKey == "" {
return c, fmt.Errorf("azure: account key required")
} else if blobServiceBaseURL == "" {
return c, fmt.Errorf("azure: base storage service url required")
}
key, err := base64.StdEncoding.DecodeString(accountKey)
if err != nil {
return c, fmt.Errorf("azure: malformed storage account key: %v", err)
}
return Client{
accountName: accountName,
accountKey: key,
useHTTPS: useHTTPS,
baseURL: blobServiceBaseURL,
apiVersion: apiVersion,
}, nil
}
func (c Client) getBaseURL(service string) string {
scheme := "http"
if c.useHTTPS {
scheme = "https"
}
host := ""
if c.accountName == StorageEmulatorAccountName {
switch service {
case blobServiceName:
host = storageEmulatorBlob
case tableServiceName:
host = storageEmulatorTable
case queueServiceName:
host = storageEmulatorQueue
}
} else {
host = fmt.Sprintf("%s.%s.%s", c.accountName, service, c.baseURL)
}
u := &url.URL{
Scheme: scheme,
Host: host}
return u.String()
}
func (c Client) getEndpoint(service, path string, params url.Values) string {
u, err := url.Parse(c.getBaseURL(service))
if err != nil {
// really should not be happening
panic(err)
}
// API doesn't accept path segments not starting with '/'
if !strings.HasPrefix(path, "/") {
path = fmt.Sprintf("/%v", path)
}
if c.accountName == StorageEmulatorAccountName {
path = fmt.Sprintf("/%v%v", StorageEmulatorAccountName, path)
}
u.Path = path
u.RawQuery = params.Encode()
return u.String()
}
// GetBlobService returns a BlobStorageClient which can operate on the blob
// service of the storage account.
func (c Client) GetBlobService() BlobStorageClient {
return BlobStorageClient{c}
}
// GetQueueService returns a QueueServiceClient which can operate on the queue
// service of the storage account.
func (c Client) GetQueueService() QueueServiceClient {
return QueueServiceClient{c}
}
// GetTableService returns a TableServiceClient which can operate on the table
// service of the storage account.
func (c Client) GetTableService() TableServiceClient {
return TableServiceClient{c}
}
// GetFileService returns a FileServiceClient which can operate on the file
// service of the storage account.
func (c Client) GetFileService() FileServiceClient {
return FileServiceClient{c}
}
func (c Client) createAuthorizationHeader(canonicalizedString string) string {
signature := c.computeHmac256(canonicalizedString)
return fmt.Sprintf("%s %s:%s", "SharedKey", c.getCanonicalizedAccountName(), signature)
}
func (c Client) getAuthorizationHeader(verb, url string, headers map[string]string) (string, error) {
canonicalizedResource, err := c.buildCanonicalizedResource(url)
if err != nil {
return "", err
}
canonicalizedString := c.buildCanonicalizedString(verb, headers, canonicalizedResource)
return c.createAuthorizationHeader(canonicalizedString), nil
}
func (c Client) getStandardHeaders() map[string]string {
return map[string]string{
"x-ms-version": c.apiVersion,
"x-ms-date": currentTimeRfc1123Formatted(),
}
}
func (c Client) getCanonicalizedAccountName() string {
// since we may be trying to access a secondary storage account, we need to
// remove the -secondary part of the storage name
return strings.TrimSuffix(c.accountName, "-secondary")
}
func (c Client) buildCanonicalizedHeader(headers map[string]string) string {
cm := make(map[string]string)
for k, v := range headers {
headerName := strings.TrimSpace(strings.ToLower(k))
match, _ := regexp.MatchString("x-ms-", headerName)
if match {
cm[headerName] = v
}
}
if len(cm) == 0 {
return ""
}
keys := make([]string, 0, len(cm))
for key := range cm {
keys = append(keys, key)
}
sort.Strings(keys)
ch := ""
for i, key := range keys {
if i == len(keys)-1 {
ch += fmt.Sprintf("%s:%s", key, cm[key])
} else {
ch += fmt.Sprintf("%s:%s\n", key, cm[key])
}
}
return ch
}
func (c Client) buildCanonicalizedResourceTable(uri string) (string, error) {
errMsg := "buildCanonicalizedResourceTable error: %s"
u, err := url.Parse(uri)
if err != nil {
return "", fmt.Errorf(errMsg, err.Error())
}
cr := "/" + c.getCanonicalizedAccountName()
if len(u.Path) > 0 {
cr += u.Path
}
return cr, nil
}
func (c Client) buildCanonicalizedResource(uri string) (string, error) {
errMsg := "buildCanonicalizedResource error: %s"
u, err := url.Parse(uri)
if err != nil {
return "", fmt.Errorf(errMsg, err.Error())
}
cr := "/" + c.getCanonicalizedAccountName()
if len(u.Path) > 0 {
// Any portion of the CanonicalizedResource string that is derived from
// the resource's URI should be encoded exactly as it is in the URI.
// -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx
cr += u.EscapedPath()
}
params, err := url.ParseQuery(u.RawQuery)
if err != nil {
return "", fmt.Errorf(errMsg, err.Error())
}
if len(params) > 0 {
cr += "\n"
keys := make([]string, 0, len(params))
for key := range params {
keys = append(keys, key)
}
sort.Strings(keys)
for i, key := range keys {
if len(params[key]) > 1 {
sort.Strings(params[key])
}
if i == len(keys)-1 {
cr += fmt.Sprintf("%s:%s", key, strings.Join(params[key], ","))
} else {
cr += fmt.Sprintf("%s:%s\n", key, strings.Join(params[key], ","))
}
}
}
return cr, nil
}
func (c Client) buildCanonicalizedString(verb string, headers map[string]string, canonicalizedResource string) string {
contentLength := headers["Content-Length"]
if contentLength == "0" {
contentLength = ""
}
canonicalizedString := fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s",
verb,
headers["Content-Encoding"],
headers["Content-Language"],
contentLength,
headers["Content-MD5"],
headers["Content-Type"],
headers["Date"],
headers["If-Modified-Since"],
headers["If-Match"],
headers["If-None-Match"],
headers["If-Unmodified-Since"],
headers["Range"],
c.buildCanonicalizedHeader(headers),
canonicalizedResource)
return canonicalizedString
}
func (c Client) exec(verb, url string, headers map[string]string, body io.Reader) (*storageResponse, error) {
authHeader, err := c.getAuthorizationHeader(verb, url, headers)
if err != nil {
return nil, err
}
headers["Authorization"] = authHeader
if err != nil {
return nil, err
}
req, err := http.NewRequest(verb, url, body)
if err != nil {
return nil, errors.New("azure/storage: error creating request: " + err.Error())
}
if clstr, ok := headers["Content-Length"]; ok {
// content length header is being signed, but completely ignored by golang.
// instead we have to use the ContentLength property on the request struct
// (see https://golang.org/src/net/http/request.go?s=18140:18370#L536 and
// https://golang.org/src/net/http/transfer.go?s=1739:2467#L49)
req.ContentLength, err = strconv.ParseInt(clstr, 10, 64)
if err != nil {
return nil, err
}
}
for k, v := range headers {
req.Header.Add(k, v)
}
httpClient := c.HTTPClient
if httpClient == nil {
httpClient = http.DefaultClient
}
resp, err := httpClient.Do(req)
if err != nil {
return nil, err
}
statusCode := resp.StatusCode
if statusCode >= 400 && statusCode <= 505 {
var respBody []byte
respBody, err = readResponseBody(resp)
if err != nil {
return nil, err
}
if len(respBody) == 0 {
// no error in response body
err = fmt.Errorf("storage: service returned without a response body (%s)", resp.Status)
} else {
// response contains storage service error object, unmarshal
storageErr, errIn := serviceErrFromXML(respBody, resp.StatusCode, resp.Header.Get("x-ms-request-id"))
if err != nil { // error unmarshaling the error response
err = errIn
}
err = storageErr
}
return &storageResponse{
statusCode: resp.StatusCode,
headers: resp.Header,
body: ioutil.NopCloser(bytes.NewReader(respBody)), /* restore the body */
}, err
}
return &storageResponse{
statusCode: resp.StatusCode,
headers: resp.Header,
body: resp.Body}, nil
}
func (c Client) execInternalJSON(verb, url string, headers map[string]string, body io.Reader) (*odataResponse, error) {
req, err := http.NewRequest(verb, url, body)
for k, v := range headers {
req.Header.Add(k, v)
}
httpClient := c.HTTPClient
if httpClient == nil {
httpClient = http.DefaultClient
}
resp, err := httpClient.Do(req)
if err != nil {
return nil, err
}
respToRet := &odataResponse{}
respToRet.body = resp.Body
respToRet.statusCode = resp.StatusCode
respToRet.headers = resp.Header
statusCode := resp.StatusCode
if statusCode >= 400 && statusCode <= 505 {
var respBody []byte
respBody, err = readResponseBody(resp)
if err != nil {
return nil, err
}
if len(respBody) == 0 {
// no error in response body
err = fmt.Errorf("storage: service returned without a response body (%d)", resp.StatusCode)
return respToRet, err
}
// try unmarshal as odata.error json
err = json.Unmarshal(respBody, &respToRet.odata)
return respToRet, err
}
return respToRet, nil
}
func (c Client) createSharedKeyLite(url string, headers map[string]string) (string, error) {
can, err := c.buildCanonicalizedResourceTable(url)
if err != nil {
return "", err
}
strToSign := headers["x-ms-date"] + "\n" + can
hmac := c.computeHmac256(strToSign)
return fmt.Sprintf("SharedKeyLite %s:%s", c.accountName, hmac), nil
}
func (c Client) execTable(verb, url string, headers map[string]string, body io.Reader) (*odataResponse, error) {
var err error
headers["Authorization"], err = c.createSharedKeyLite(url, headers)
if err != nil {
return nil, err
}
return c.execInternalJSON(verb, url, headers, body)
}
func readResponseBody(resp *http.Response) ([]byte, error) {
defer resp.Body.Close()
out, err := ioutil.ReadAll(resp.Body)
if err == io.EOF {
err = nil
}
return out, err
}
func serviceErrFromXML(body []byte, statusCode int, requestID string) (AzureStorageServiceError, error) {
var storageErr AzureStorageServiceError
if err := xml.Unmarshal(body, &storageErr); err != nil {
return storageErr, err
}
storageErr.StatusCode = statusCode
storageErr.RequestID = requestID
return storageErr, nil
}
func (e AzureStorageServiceError) Error() string {
return fmt.Sprintf("storage: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestId=%s, QueryParameterName=%s, QueryParameterValue=%s",
e.StatusCode, e.Code, e.Message, e.RequestID, e.QueryParameterName, e.QueryParameterValue)
}
// checkRespCode returns UnexpectedStatusError if the given response code is not
// one of the allowed status codes; otherwise nil.
func checkRespCode(respCode int, allowed []int) error {
for _, v := range allowed {
if respCode == v {
return nil
}
}
return UnexpectedStatusCodeError{allowed, respCode}
}

View File

@ -0,0 +1,352 @@
package storage
import (
"encoding/xml"
"fmt"
"net/http"
"net/url"
"strings"
)
// FileServiceClient contains operations for Microsoft Azure File Service.
type FileServiceClient struct {
client Client
}
// A Share is an entry in ShareListResponse.
type Share struct {
Name string `xml:"Name"`
Properties ShareProperties `xml:"Properties"`
}
// ShareProperties contains various properties of a share returned from
// various endpoints like ListShares.
type ShareProperties struct {
LastModified string `xml:"Last-Modified"`
Etag string `xml:"Etag"`
Quota string `xml:"Quota"`
}
// ShareListResponse contains the response fields from
// ListShares call.
//
// See https://msdn.microsoft.com/en-us/library/azure/dn167009.aspx
type ShareListResponse struct {
XMLName xml.Name `xml:"EnumerationResults"`
Xmlns string `xml:"xmlns,attr"`
Prefix string `xml:"Prefix"`
Marker string `xml:"Marker"`
NextMarker string `xml:"NextMarker"`
MaxResults int64 `xml:"MaxResults"`
Shares []Share `xml:"Shares>Share"`
}
// ListSharesParameters defines the set of customizable parameters to make a
// List Shares call.
//
// See https://msdn.microsoft.com/en-us/library/azure/dn167009.aspx
type ListSharesParameters struct {
Prefix string
Marker string
Include string
MaxResults uint
Timeout uint
}
// ShareHeaders contains various properties of a file and is an entry
// in SetShareProperties
type ShareHeaders struct {
Quota string `header:"x-ms-share-quota"`
}
func (p ListSharesParameters) getParameters() url.Values {
out := url.Values{}
if p.Prefix != "" {
out.Set("prefix", p.Prefix)
}
if p.Marker != "" {
out.Set("marker", p.Marker)
}
if p.Include != "" {
out.Set("include", p.Include)
}
if p.MaxResults != 0 {
out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults))
}
if p.Timeout != 0 {
out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
}
return out
}
// pathForFileShare returns the URL path segment for a File Share resource
func pathForFileShare(name string) string {
return fmt.Sprintf("/%s", name)
}
// ListShares returns the list of shares in a storage account along with
// pagination token and other response details.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
func (f FileServiceClient) ListShares(params ListSharesParameters) (ShareListResponse, error) {
q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}})
uri := f.client.getEndpoint(fileServiceName, "", q)
headers := f.client.getStandardHeaders()
var out ShareListResponse
resp, err := f.client.exec("GET", uri, headers, nil)
if err != nil {
return out, err
}
defer resp.body.Close()
err = xmlUnmarshal(resp.body, &out)
return out, err
}
// CreateShare operation creates a new share under the specified account. If the
// share with the same name already exists, the operation fails.
//
// See https://msdn.microsoft.com/en-us/library/azure/dn167008.aspx
func (f FileServiceClient) CreateShare(name string) error {
resp, err := f.createShare(name)
if err != nil {
return err
}
defer resp.body.Close()
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
}
// ShareExists returns true if a share with given name exists
// on the storage account, otherwise returns false.
func (f FileServiceClient) ShareExists(name string) (bool, error) {
uri := f.client.getEndpoint(fileServiceName, pathForFileShare(name), url.Values{"restype": {"share"}})
headers := f.client.getStandardHeaders()
resp, err := f.client.exec("HEAD", uri, headers, nil)
if resp != nil {
defer resp.body.Close()
if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound {
return resp.statusCode == http.StatusOK, nil
}
}
return false, err
}
// GetShareURL gets the canonical URL to the share with the specified name in the
// specified container. This method does not create a publicly accessible URL if
// the file is private and this method does not check if the file
// exists.
func (f FileServiceClient) GetShareURL(name string) string {
return f.client.getEndpoint(fileServiceName, pathForFileShare(name), url.Values{})
}
// CreateShareIfNotExists creates a new share under the specified account if
// it does not exist. Returns true if container is newly created or false if
// container already exists.
//
// See https://msdn.microsoft.com/en-us/library/azure/dn167008.aspx
func (f FileServiceClient) CreateShareIfNotExists(name string) (bool, error) {
resp, err := f.createShare(name)
if resp != nil {
defer resp.body.Close()
if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict {
return resp.statusCode == http.StatusCreated, nil
}
}
return false, err
}
// CreateShare creates a Azure File Share and returns its response
func (f FileServiceClient) createShare(name string) (*storageResponse, error) {
if err := f.checkForStorageEmulator(); err != nil {
return nil, err
}
uri := f.client.getEndpoint(fileServiceName, pathForFileShare(name), url.Values{"restype": {"share"}})
headers := f.client.getStandardHeaders()
return f.client.exec("PUT", uri, headers, nil)
}
// GetShareProperties provides various information about the specified
// file. See https://msdn.microsoft.com/en-us/library/azure/dn689099.aspx
func (f FileServiceClient) GetShareProperties(name string) (*ShareProperties, error) {
uri := f.client.getEndpoint(fileServiceName, pathForFileShare(name), url.Values{"restype": {"share"}})
headers := f.client.getStandardHeaders()
resp, err := f.client.exec("HEAD", uri, headers, nil)
if err != nil {
return nil, err
}
defer resp.body.Close()
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
return nil, err
}
return &ShareProperties{
LastModified: resp.headers.Get("Last-Modified"),
Etag: resp.headers.Get("Etag"),
Quota: resp.headers.Get("x-ms-share-quota"),
}, nil
}
// SetShareProperties replaces the ShareHeaders for the specified file.
//
// Some keys may be converted to Camel-Case before sending. All keys
// are returned in lower case by SetShareProperties. HTTP header names
// are case-insensitive so case munging should not matter to other
// applications either.
//
// See https://msdn.microsoft.com/en-us/library/azure/mt427368.aspx
func (f FileServiceClient) SetShareProperties(name string, shareHeaders ShareHeaders) error {
params := url.Values{}
params.Set("restype", "share")
params.Set("comp", "properties")
uri := f.client.getEndpoint(fileServiceName, pathForFileShare(name), params)
headers := f.client.getStandardHeaders()
extraHeaders := headersFromStruct(shareHeaders)
for k, v := range extraHeaders {
headers[k] = v
}
resp, err := f.client.exec("PUT", uri, headers, nil)
if err != nil {
return err
}
defer resp.body.Close()
return checkRespCode(resp.statusCode, []int{http.StatusOK})
}
// DeleteShare operation marks the specified share for deletion. The share
// and any files contained within it are later deleted during garbage
// collection.
//
// See https://msdn.microsoft.com/en-us/library/azure/dn689090.aspx
func (f FileServiceClient) DeleteShare(name string) error {
resp, err := f.deleteShare(name)
if err != nil {
return err
}
defer resp.body.Close()
return checkRespCode(resp.statusCode, []int{http.StatusAccepted})
}
// DeleteShareIfExists operation marks the specified share for deletion if it
// exists. The share and any files contained within it are later deleted during
// garbage collection. Returns true if share existed and deleted with this call,
// false otherwise.
//
// See https://msdn.microsoft.com/en-us/library/azure/dn689090.aspx
func (f FileServiceClient) DeleteShareIfExists(name string) (bool, error) {
resp, err := f.deleteShare(name)
if resp != nil {
defer resp.body.Close()
if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
return resp.statusCode == http.StatusAccepted, nil
}
}
return false, err
}
// deleteShare makes the call to Delete Share operation endpoint and returns
// the response
func (f FileServiceClient) deleteShare(name string) (*storageResponse, error) {
if err := f.checkForStorageEmulator(); err != nil {
return nil, err
}
uri := f.client.getEndpoint(fileServiceName, pathForFileShare(name), url.Values{"restype": {"share"}})
return f.client.exec("DELETE", uri, f.client.getStandardHeaders(), nil)
}
// SetShareMetadata replaces the metadata for the specified Share.
//
// Some keys may be converted to Camel-Case before sending. All keys
// are returned in lower case by GetShareMetadata. HTTP header names
// are case-insensitive so case munging should not matter to other
// applications either.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
func (f FileServiceClient) SetShareMetadata(name string, metadata map[string]string, extraHeaders map[string]string) error {
params := url.Values{}
params.Set("restype", "share")
params.Set("comp", "metadata")
uri := f.client.getEndpoint(fileServiceName, pathForFileShare(name), params)
headers := f.client.getStandardHeaders()
for k, v := range metadata {
headers[userDefinedMetadataHeaderPrefix+k] = v
}
for k, v := range extraHeaders {
headers[k] = v
}
resp, err := f.client.exec("PUT", uri, headers, nil)
if err != nil {
return err
}
defer resp.body.Close()
return checkRespCode(resp.statusCode, []int{http.StatusOK})
}
// GetShareMetadata returns all user-defined metadata for the specified share.
//
// All metadata keys will be returned in lower case. (HTTP header
// names are case-insensitive.)
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
func (f FileServiceClient) GetShareMetadata(name string) (map[string]string, error) {
params := url.Values{}
params.Set("restype", "share")
params.Set("comp", "metadata")
uri := f.client.getEndpoint(fileServiceName, pathForFileShare(name), params)
headers := f.client.getStandardHeaders()
resp, err := f.client.exec("GET", uri, headers, nil)
if err != nil {
return nil, err
}
defer resp.body.Close()
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
return nil, err
}
metadata := make(map[string]string)
for k, v := range resp.headers {
// Can't trust CanonicalHeaderKey() to munge case
// reliably. "_" is allowed in identifiers:
// https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
// https://msdn.microsoft.com/library/aa664670(VS.71).aspx
// http://tools.ietf.org/html/rfc7230#section-3.2
// ...but "_" is considered invalid by
// CanonicalMIMEHeaderKey in
// https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542
// so k can be "X-Ms-Meta-Foo" or "x-ms-meta-foo_bar".
k = strings.ToLower(k)
if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) {
continue
}
// metadata["foo"] = content of the last X-Ms-Meta-Foo header
k = k[len(userDefinedMetadataHeaderPrefix):]
metadata[k] = v[len(v)-1]
}
return metadata, nil
}
//checkForStorageEmulator determines if the client is setup for use with
//Azure Storage Emulator, and returns a relevant error
func (f FileServiceClient) checkForStorageEmulator() error {
if f.client.accountName == StorageEmulatorAccountName {
return fmt.Errorf("Error: File service is not currently supported by Azure Storage Emulator")
}
return nil
}

View File

@ -0,0 +1,306 @@
package storage
import (
"encoding/xml"
"fmt"
"net/http"
"net/url"
"strconv"
"strings"
)
const (
// casing is per Golang's http.Header canonicalizing the header names.
approximateMessagesCountHeader = "X-Ms-Approximate-Messages-Count"
userDefinedMetadataHeaderPrefix = "X-Ms-Meta-"
)
// QueueServiceClient contains operations for Microsoft Azure Queue Storage
// Service.
type QueueServiceClient struct {
client Client
}
func pathForQueue(queue string) string { return fmt.Sprintf("/%s", queue) }
func pathForQueueMessages(queue string) string { return fmt.Sprintf("/%s/messages", queue) }
func pathForMessage(queue, name string) string { return fmt.Sprintf("/%s/messages/%s", queue, name) }
type putMessageRequest struct {
XMLName xml.Name `xml:"QueueMessage"`
MessageText string `xml:"MessageText"`
}
// PutMessageParameters is the set of options can be specified for Put Messsage
// operation. A zero struct does not use any preferences for the request.
type PutMessageParameters struct {
VisibilityTimeout int
MessageTTL int
}
func (p PutMessageParameters) getParameters() url.Values {
out := url.Values{}
if p.VisibilityTimeout != 0 {
out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout))
}
if p.MessageTTL != 0 {
out.Set("messagettl", strconv.Itoa(p.MessageTTL))
}
return out
}
// GetMessagesParameters is the set of options can be specified for Get
// Messsages operation. A zero struct does not use any preferences for the
// request.
type GetMessagesParameters struct {
NumOfMessages int
VisibilityTimeout int
}
func (p GetMessagesParameters) getParameters() url.Values {
out := url.Values{}
if p.NumOfMessages != 0 {
out.Set("numofmessages", strconv.Itoa(p.NumOfMessages))
}
if p.VisibilityTimeout != 0 {
out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout))
}
return out
}
// PeekMessagesParameters is the set of options can be specified for Peek
// Messsage operation. A zero struct does not use any preferences for the
// request.
type PeekMessagesParameters struct {
NumOfMessages int
}
func (p PeekMessagesParameters) getParameters() url.Values {
out := url.Values{"peekonly": {"true"}} // Required for peek operation
if p.NumOfMessages != 0 {
out.Set("numofmessages", strconv.Itoa(p.NumOfMessages))
}
return out
}
// GetMessagesResponse represents a response returned from Get Messages
// operation.
type GetMessagesResponse struct {
XMLName xml.Name `xml:"QueueMessagesList"`
QueueMessagesList []GetMessageResponse `xml:"QueueMessage"`
}
// GetMessageResponse represents a QueueMessage object returned from Get
// Messages operation response.
type GetMessageResponse struct {
MessageID string `xml:"MessageId"`
InsertionTime string `xml:"InsertionTime"`
ExpirationTime string `xml:"ExpirationTime"`
PopReceipt string `xml:"PopReceipt"`
TimeNextVisible string `xml:"TimeNextVisible"`
DequeueCount int `xml:"DequeueCount"`
MessageText string `xml:"MessageText"`
}
// PeekMessagesResponse represents a response returned from Get Messages
// operation.
type PeekMessagesResponse struct {
XMLName xml.Name `xml:"QueueMessagesList"`
QueueMessagesList []PeekMessageResponse `xml:"QueueMessage"`
}
// PeekMessageResponse represents a QueueMessage object returned from Peek
// Messages operation response.
type PeekMessageResponse struct {
MessageID string `xml:"MessageId"`
InsertionTime string `xml:"InsertionTime"`
ExpirationTime string `xml:"ExpirationTime"`
DequeueCount int `xml:"DequeueCount"`
MessageText string `xml:"MessageText"`
}
// QueueMetadataResponse represents user defined metadata and queue
// properties on a specific queue.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179384.aspx
type QueueMetadataResponse struct {
ApproximateMessageCount int
UserDefinedMetadata map[string]string
}
// SetMetadata operation sets user-defined metadata on the specified queue.
// Metadata is associated with the queue as name-value pairs.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179348.aspx
func (c QueueServiceClient) SetMetadata(name string, metadata map[string]string) error {
uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": []string{"metadata"}})
headers := c.client.getStandardHeaders()
for k, v := range metadata {
headers[userDefinedMetadataHeaderPrefix+k] = v
}
resp, err := c.client.exec("PUT", uri, headers, nil)
if err != nil {
return err
}
defer resp.body.Close()
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
}
// GetMetadata operation retrieves user-defined metadata and queue
// properties on the specified queue. Metadata is associated with
// the queue as name-values pairs.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179384.aspx
//
// Because the way Golang's http client (and http.Header in particular)
// canonicalize header names, the returned metadata names would always
// be all lower case.
func (c QueueServiceClient) GetMetadata(name string) (QueueMetadataResponse, error) {
qm := QueueMetadataResponse{}
qm.UserDefinedMetadata = make(map[string]string)
uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": []string{"metadata"}})
headers := c.client.getStandardHeaders()
resp, err := c.client.exec("GET", uri, headers, nil)
if err != nil {
return qm, err
}
defer resp.body.Close()
for k, v := range resp.headers {
if len(v) != 1 {
return qm, fmt.Errorf("Unexpected number of values (%d) in response header '%s'", len(v), k)
}
value := v[0]
if k == approximateMessagesCountHeader {
qm.ApproximateMessageCount, err = strconv.Atoi(value)
if err != nil {
return qm, fmt.Errorf("Unexpected value in response header '%s': '%s' ", k, value)
}
} else if strings.HasPrefix(k, userDefinedMetadataHeaderPrefix) {
name := strings.TrimPrefix(k, userDefinedMetadataHeaderPrefix)
qm.UserDefinedMetadata[strings.ToLower(name)] = value
}
}
return qm, checkRespCode(resp.statusCode, []int{http.StatusOK})
}
// CreateQueue operation creates a queue under the given account.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179342.aspx
func (c QueueServiceClient) CreateQueue(name string) error {
uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{})
headers := c.client.getStandardHeaders()
resp, err := c.client.exec("PUT", uri, headers, nil)
if err != nil {
return err
}
defer resp.body.Close()
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
}
// DeleteQueue operation permanently deletes the specified queue.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179436.aspx
func (c QueueServiceClient) DeleteQueue(name string) error {
uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{})
resp, err := c.client.exec("DELETE", uri, c.client.getStandardHeaders(), nil)
if err != nil {
return err
}
defer resp.body.Close()
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
}
// QueueExists returns true if a queue with given name exists.
func (c QueueServiceClient) QueueExists(name string) (bool, error) {
uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": {"metadata"}})
resp, err := c.client.exec("GET", uri, c.client.getStandardHeaders(), nil)
if resp != nil && (resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound) {
return resp.statusCode == http.StatusOK, nil
}
return false, err
}
// PutMessage operation adds a new message to the back of the message queue.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179346.aspx
func (c QueueServiceClient) PutMessage(queue string, message string, params PutMessageParameters) error {
uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters())
req := putMessageRequest{MessageText: message}
body, nn, err := xmlMarshal(req)
if err != nil {
return err
}
headers := c.client.getStandardHeaders()
headers["Content-Length"] = strconv.Itoa(nn)
resp, err := c.client.exec("POST", uri, headers, body)
if err != nil {
return err
}
defer resp.body.Close()
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
}
// ClearMessages operation deletes all messages from the specified queue.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179454.aspx
func (c QueueServiceClient) ClearMessages(queue string) error {
uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), url.Values{})
resp, err := c.client.exec("DELETE", uri, c.client.getStandardHeaders(), nil)
if err != nil {
return err
}
defer resp.body.Close()
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
}
// GetMessages operation retrieves one or more messages from the front of the
// queue.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179474.aspx
func (c QueueServiceClient) GetMessages(queue string, params GetMessagesParameters) (GetMessagesResponse, error) {
var r GetMessagesResponse
uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters())
resp, err := c.client.exec("GET", uri, c.client.getStandardHeaders(), nil)
if err != nil {
return r, err
}
defer resp.body.Close()
err = xmlUnmarshal(resp.body, &r)
return r, err
}
// PeekMessages retrieves one or more messages from the front of the queue, but
// does not alter the visibility of the message.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179472.aspx
func (c QueueServiceClient) PeekMessages(queue string, params PeekMessagesParameters) (PeekMessagesResponse, error) {
var r PeekMessagesResponse
uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters())
resp, err := c.client.exec("GET", uri, c.client.getStandardHeaders(), nil)
if err != nil {
return r, err
}
defer resp.body.Close()
err = xmlUnmarshal(resp.body, &r)
return r, err
}
// DeleteMessage operation deletes the specified message.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179347.aspx
func (c QueueServiceClient) DeleteMessage(queue, messageID, popReceipt string) error {
uri := c.client.getEndpoint(queueServiceName, pathForMessage(queue, messageID), url.Values{
"popreceipt": {popReceipt}})
resp, err := c.client.exec("DELETE", uri, c.client.getStandardHeaders(), nil)
if err != nil {
return err
}
defer resp.body.Close()
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
}

View File

@ -0,0 +1,129 @@
package storage
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"net/url"
)
// TableServiceClient contains operations for Microsoft Azure Table Storage
// Service.
type TableServiceClient struct {
client Client
}
// AzureTable is the typedef of the Azure Table name
type AzureTable string
const (
tablesURIPath = "/Tables"
)
type createTableRequest struct {
TableName string `json:"TableName"`
}
func pathForTable(table AzureTable) string { return fmt.Sprintf("%s", table) }
func (c *TableServiceClient) getStandardHeaders() map[string]string {
return map[string]string{
"x-ms-version": "2015-02-21",
"x-ms-date": currentTimeRfc1123Formatted(),
"Accept": "application/json;odata=nometadata",
"Accept-Charset": "UTF-8",
"Content-Type": "application/json",
}
}
// QueryTables returns the tables created in the
// *TableServiceClient storage account.
func (c *TableServiceClient) QueryTables() ([]AzureTable, error) {
uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{})
headers := c.getStandardHeaders()
headers["Content-Length"] = "0"
resp, err := c.client.execTable("GET", uri, headers, nil)
if err != nil {
return nil, err
}
defer resp.body.Close()
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
return nil, err
}
buf := new(bytes.Buffer)
buf.ReadFrom(resp.body)
var respArray queryTablesResponse
if err := json.Unmarshal(buf.Bytes(), &respArray); err != nil {
return nil, err
}
s := make([]AzureTable, len(respArray.TableName))
for i, elem := range respArray.TableName {
s[i] = AzureTable(elem.TableName)
}
return s, nil
}
// CreateTable creates the table given the specific
// name. This function fails if the name is not compliant
// with the specification or the tables already exists.
func (c *TableServiceClient) CreateTable(table AzureTable) error {
uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{})
headers := c.getStandardHeaders()
req := createTableRequest{TableName: string(table)}
buf := new(bytes.Buffer)
if err := json.NewEncoder(buf).Encode(req); err != nil {
return err
}
headers["Content-Length"] = fmt.Sprintf("%d", buf.Len())
resp, err := c.client.execTable("POST", uri, headers, buf)
if err != nil {
return err
}
defer resp.body.Close()
if err := checkRespCode(resp.statusCode, []int{http.StatusCreated}); err != nil {
return err
}
return nil
}
// DeleteTable deletes the table given the specific
// name. This function fails if the table is not present.
// Be advised: DeleteTable deletes all the entries
// that may be present.
func (c *TableServiceClient) DeleteTable(table AzureTable) error {
uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{})
uri += fmt.Sprintf("('%s')", string(table))
headers := c.getStandardHeaders()
headers["Content-Length"] = "0"
resp, err := c.client.execTable("DELETE", uri, headers, nil)
if err != nil {
return err
}
defer resp.body.Close()
if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,355 @@
package storage
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"reflect"
)
const (
partitionKeyNode = "PartitionKey"
rowKeyNode = "RowKey"
tag = "table"
tagIgnore = "-"
continuationTokenPartitionKeyHeader = "X-Ms-Continuation-Nextpartitionkey"
continuationTokenRowHeader = "X-Ms-Continuation-Nextrowkey"
maxTopParameter = 1000
)
type queryTablesResponse struct {
TableName []struct {
TableName string `json:"TableName"`
} `json:"value"`
}
const (
tableOperationTypeInsert = iota
tableOperationTypeUpdate = iota
tableOperationTypeMerge = iota
tableOperationTypeInsertOrReplace = iota
tableOperationTypeInsertOrMerge = iota
)
type tableOperation int
// TableEntity interface specifies
// the functions needed to support
// marshaling and unmarshaling into
// Azure Tables. The struct must only contain
// simple types because Azure Tables do not
// support hierarchy.
type TableEntity interface {
PartitionKey() string
RowKey() string
SetPartitionKey(string) error
SetRowKey(string) error
}
// ContinuationToken is an opaque (ie not useful to inspect)
// struct that Get... methods can return if there are more
// entries to be returned than the ones already
// returned. Just pass it to the same function to continue
// receiving the remaining entries.
type ContinuationToken struct {
NextPartitionKey string
NextRowKey string
}
type getTableEntriesResponse struct {
Elements []map[string]interface{} `json:"value"`
}
// QueryTableEntities queries the specified table and returns the unmarshaled
// entities of type retType.
// top parameter limits the returned entries up to top. Maximum top
// allowed by Azure API is 1000. In case there are more than top entries to be
// returned the function will return a non nil *ContinuationToken. You can call the
// same function again passing the received ContinuationToken as previousContToken
// parameter in order to get the following entries. The query parameter
// is the odata query. To retrieve all the entries pass the empty string.
// The function returns a pointer to a TableEntity slice, the *ContinuationToken
// if there are more entries to be returned and an error in case something went
// wrong.
//
// Example:
// entities, cToken, err = tSvc.QueryTableEntities("table", cToken, reflect.TypeOf(entity), 20, "")
func (c *TableServiceClient) QueryTableEntities(tableName AzureTable, previousContToken *ContinuationToken, retType reflect.Type, top int, query string) ([]TableEntity, *ContinuationToken, error) {
if top > maxTopParameter {
return nil, nil, fmt.Errorf("top accepts at maximum %d elements. Requested %d instead", maxTopParameter, top)
}
uri := c.client.getEndpoint(tableServiceName, pathForTable(tableName), url.Values{})
uri += fmt.Sprintf("?$top=%d", top)
if query != "" {
uri += fmt.Sprintf("&$filter=%s", url.QueryEscape(query))
}
if previousContToken != nil {
uri += fmt.Sprintf("&NextPartitionKey=%s&NextRowKey=%s", previousContToken.NextPartitionKey, previousContToken.NextRowKey)
}
headers := c.getStandardHeaders()
headers["Content-Length"] = "0"
resp, err := c.client.execTable("GET", uri, headers, nil)
if err != nil {
return nil, nil, err
}
contToken := extractContinuationTokenFromHeaders(resp.headers)
if err != nil {
return nil, contToken, err
}
defer resp.body.Close()
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
return nil, contToken, err
}
retEntries, err := deserializeEntity(retType, resp.body)
if err != nil {
return nil, contToken, err
}
return retEntries, contToken, nil
}
// InsertEntity inserts an entity in the specified table.
// The function fails if there is an entity with the same
// PartitionKey and RowKey in the table.
func (c *TableServiceClient) InsertEntity(table AzureTable, entity TableEntity) error {
var err error
if sc, err := c.execTable(table, entity, false, "POST"); err != nil {
return checkRespCode(sc, []int{http.StatusCreated})
}
return err
}
func (c *TableServiceClient) execTable(table AzureTable, entity TableEntity, specifyKeysInURL bool, method string) (int, error) {
uri := c.client.getEndpoint(tableServiceName, pathForTable(table), url.Values{})
if specifyKeysInURL {
uri += fmt.Sprintf("(PartitionKey='%s',RowKey='%s')", url.QueryEscape(entity.PartitionKey()), url.QueryEscape(entity.RowKey()))
}
headers := c.getStandardHeaders()
var buf bytes.Buffer
if err := injectPartitionAndRowKeys(entity, &buf); err != nil {
return 0, err
}
headers["Content-Length"] = fmt.Sprintf("%d", buf.Len())
var err error
var resp *odataResponse
resp, err = c.client.execTable(method, uri, headers, &buf)
if err != nil {
return 0, err
}
defer resp.body.Close()
return resp.statusCode, nil
}
// UpdateEntity updates the contents of an entity with the
// one passed as parameter. The function fails if there is no entity
// with the same PartitionKey and RowKey in the table.
func (c *TableServiceClient) UpdateEntity(table AzureTable, entity TableEntity) error {
var err error
if sc, err := c.execTable(table, entity, true, "PUT"); err != nil {
return checkRespCode(sc, []int{http.StatusNoContent})
}
return err
}
// MergeEntity merges the contents of an entity with the
// one passed as parameter.
// The function fails if there is no entity
// with the same PartitionKey and RowKey in the table.
func (c *TableServiceClient) MergeEntity(table AzureTable, entity TableEntity) error {
var err error
if sc, err := c.execTable(table, entity, true, "MERGE"); err != nil {
return checkRespCode(sc, []int{http.StatusNoContent})
}
return err
}
// DeleteEntityWithoutCheck deletes the entity matching by
// PartitionKey and RowKey. There is no check on IfMatch
// parameter so the entity is always deleted.
// The function fails if there is no entity
// with the same PartitionKey and RowKey in the table.
func (c *TableServiceClient) DeleteEntityWithoutCheck(table AzureTable, entity TableEntity) error {
return c.DeleteEntity(table, entity, "*")
}
// DeleteEntity deletes the entity matching by
// PartitionKey, RowKey and ifMatch field.
// The function fails if there is no entity
// with the same PartitionKey and RowKey in the table or
// the ifMatch is different.
func (c *TableServiceClient) DeleteEntity(table AzureTable, entity TableEntity, ifMatch string) error {
uri := c.client.getEndpoint(tableServiceName, pathForTable(table), url.Values{})
uri += fmt.Sprintf("(PartitionKey='%s',RowKey='%s')", url.QueryEscape(entity.PartitionKey()), url.QueryEscape(entity.RowKey()))
headers := c.getStandardHeaders()
headers["Content-Length"] = "0"
headers["If-Match"] = ifMatch
resp, err := c.client.execTable("DELETE", uri, headers, nil)
if err != nil {
return err
}
defer resp.body.Close()
if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil {
return err
}
return nil
}
// InsertOrReplaceEntity inserts an entity in the specified table
// or replaced the existing one.
func (c *TableServiceClient) InsertOrReplaceEntity(table AzureTable, entity TableEntity) error {
var err error
if sc, err := c.execTable(table, entity, true, "PUT"); err != nil {
return checkRespCode(sc, []int{http.StatusNoContent})
}
return err
}
// InsertOrMergeEntity inserts an entity in the specified table
// or merges the existing one.
func (c *TableServiceClient) InsertOrMergeEntity(table AzureTable, entity TableEntity) error {
var err error
if sc, err := c.execTable(table, entity, true, "MERGE"); err != nil {
return checkRespCode(sc, []int{http.StatusNoContent})
}
return err
}
func injectPartitionAndRowKeys(entity TableEntity, buf *bytes.Buffer) error {
if err := json.NewEncoder(buf).Encode(entity); err != nil {
return err
}
dec := make(map[string]interface{})
if err := json.NewDecoder(buf).Decode(&dec); err != nil {
return err
}
// Inject PartitionKey and RowKey
dec[partitionKeyNode] = entity.PartitionKey()
dec[rowKeyNode] = entity.RowKey()
// Remove tagged fields
// The tag is defined in the const section
// This is useful to avoid storing the PartitionKey and RowKey twice.
numFields := reflect.ValueOf(entity).Elem().NumField()
for i := 0; i < numFields; i++ {
f := reflect.ValueOf(entity).Elem().Type().Field(i)
if f.Tag.Get(tag) == tagIgnore {
// we must look for its JSON name in the dictionary
// as the user can rename it using a tag
jsonName := f.Name
if f.Tag.Get("json") != "" {
jsonName = f.Tag.Get("json")
}
delete(dec, jsonName)
}
}
buf.Reset()
if err := json.NewEncoder(buf).Encode(&dec); err != nil {
return err
}
return nil
}
func deserializeEntity(retType reflect.Type, reader io.Reader) ([]TableEntity, error) {
buf := new(bytes.Buffer)
var ret getTableEntriesResponse
if err := json.NewDecoder(reader).Decode(&ret); err != nil {
return nil, err
}
tEntries := make([]TableEntity, len(ret.Elements))
for i, entry := range ret.Elements {
buf.Reset()
if err := json.NewEncoder(buf).Encode(entry); err != nil {
return nil, err
}
dec := make(map[string]interface{})
if err := json.NewDecoder(buf).Decode(&dec); err != nil {
return nil, err
}
var pKey, rKey string
// strip pk and rk
for key, val := range dec {
switch key {
case partitionKeyNode:
pKey = val.(string)
case rowKeyNode:
rKey = val.(string)
}
}
delete(dec, partitionKeyNode)
delete(dec, rowKeyNode)
buf.Reset()
if err := json.NewEncoder(buf).Encode(dec); err != nil {
return nil, err
}
// Create a empty retType instance
tEntries[i] = reflect.New(retType.Elem()).Interface().(TableEntity)
// Popolate it with the values
if err := json.NewDecoder(buf).Decode(&tEntries[i]); err != nil {
return nil, err
}
// Reset PartitionKey and RowKey
tEntries[i].SetPartitionKey(pKey)
tEntries[i].SetRowKey(rKey)
}
return tEntries, nil
}
func extractContinuationTokenFromHeaders(h http.Header) *ContinuationToken {
ct := ContinuationToken{h.Get(continuationTokenPartitionKeyHeader), h.Get(continuationTokenRowHeader)}
if ct.NextPartitionKey != "" && ct.NextRowKey != "" {
return &ct
}
return nil
}

View File

@ -0,0 +1,85 @@
package storage
import (
"bytes"
"crypto/hmac"
"crypto/sha256"
"encoding/base64"
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"reflect"
"time"
)
func (c Client) computeHmac256(message string) string {
h := hmac.New(sha256.New, c.accountKey)
h.Write([]byte(message))
return base64.StdEncoding.EncodeToString(h.Sum(nil))
}
func currentTimeRfc1123Formatted() string {
return timeRfc1123Formatted(time.Now().UTC())
}
func timeRfc1123Formatted(t time.Time) string {
return t.Format(http.TimeFormat)
}
func mergeParams(v1, v2 url.Values) url.Values {
out := url.Values{}
for k, v := range v1 {
out[k] = v
}
for k, v := range v2 {
vals, ok := out[k]
if ok {
vals = append(vals, v...)
out[k] = vals
} else {
out[k] = v
}
}
return out
}
func prepareBlockListRequest(blocks []Block) string {
s := `<?xml version="1.0" encoding="utf-8"?><BlockList>`
for _, v := range blocks {
s += fmt.Sprintf("<%s>%s</%s>", v.Status, v.ID, v.Status)
}
s += `</BlockList>`
return s
}
func xmlUnmarshal(body io.Reader, v interface{}) error {
data, err := ioutil.ReadAll(body)
if err != nil {
return err
}
return xml.Unmarshal(data, v)
}
func xmlMarshal(v interface{}) (io.Reader, int, error) {
b, err := xml.Marshal(v)
if err != nil {
return nil, 0, err
}
return bytes.NewReader(b), len(b), nil
}
func headersFromStruct(v interface{}) map[string]string {
headers := make(map[string]string)
value := reflect.ValueOf(v)
for i := 0; i < value.NumField(); i++ {
key := value.Type().Field(i).Tag.Get("header")
val := value.Field(i).String()
if val != "" {
headers[key] = val
}
}
return headers
}

View File

@ -1,6 +1,6 @@
ISC License
Copyright (c) 2012-2013 Dave Collins <dave@davec.name>
Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above

View File

@ -1,11 +1,13 @@
go-spew
=======
[![Build Status](https://travis-ci.org/davecgh/go-spew.png?branch=master)]
(https://travis-ci.org/davecgh/go-spew) [![Coverage Status]
(https://coveralls.io/repos/davecgh/go-spew/badge.png?branch=master)]
[![Build Status](https://img.shields.io/travis/davecgh/go-spew.svg)]
(https://travis-ci.org/davecgh/go-spew) [![ISC License]
(http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org) [![Coverage Status]
(https://img.shields.io/coveralls/davecgh/go-spew.svg)]
(https://coveralls.io/r/davecgh/go-spew?branch=master)
Go-spew implements a deep pretty printer for Go data structures to aid in
debugging. A comprehensive suite of tests with 100% test coverage is provided
to ensure proper functionality. See `test_coverage.txt` for the gocov coverage
@ -19,7 +21,7 @@ post about it
## Documentation
[![GoDoc](https://godoc.org/github.com/davecgh/go-spew/spew?status.png)]
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)]
(http://godoc.org/github.com/davecgh/go-spew/spew)
Full `go doc` style documentation for the project can be viewed online without
@ -160,6 +162,15 @@ options. See the ConfigState documentation for more details.
App Engine or with the "safe" build tag specified.
Pointer method invocation is enabled by default.
* DisablePointerAddresses
DisablePointerAddresses specifies whether to disable the printing of
pointer addresses. This is useful when diffing data structures in tests.
* DisableCapacities
DisableCapacities specifies whether to disable the printing of capacities
for arrays, slices, maps and channels. This is useful when diffing data
structures in tests.
* ContinueOnMethod
Enables recursion into types after invoking error and Stringer interface
methods. Recursion after method invocation is disabled by default.
@ -191,4 +202,4 @@ using the unsafe package.
## License
Go-spew is licensed under the liberal ISC License.
Go-spew is licensed under the [copyfree](http://copyfree.org) ISC License.

View File

@ -1,4 +1,4 @@
// Copyright (c) 2015 Dave Collins <dave@davec.name>
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above

View File

@ -1,4 +1,4 @@
// Copyright (c) 2015 Dave Collins <dave@davec.name>
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013 Dave Collins <dave@davec.name>
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013 Dave Collins <dave@davec.name>
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -67,6 +67,15 @@ type ConfigState struct {
// Google App Engine or with the "safe" build tag specified.
DisablePointerMethods bool
// DisablePointerAddresses specifies whether to disable the printing of
// pointer addresses. This is useful when diffing data structures in tests.
DisablePointerAddresses bool
// DisableCapacities specifies whether to disable the printing of capacities
// for arrays, slices, maps and channels. This is useful when diffing
// data structures in tests.
DisableCapacities bool
// ContinueOnMethod specifies whether or not recursion should continue once
// a custom error or Stringer interface is invoked. The default, false,
// means it will print the results of invoking the custom error or Stringer

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013 Dave Collins <dave@davec.name>
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -91,6 +91,15 @@ The following configuration options are available:
which only accept pointer receivers from non-pointer variables.
Pointer method invocation is enabled by default.
* DisablePointerAddresses
DisablePointerAddresses specifies whether to disable the printing of
pointer addresses. This is useful when diffing data structures in tests.
* DisableCapacities
DisableCapacities specifies whether to disable the printing of
capacities for arrays, slices, maps and channels. This is useful when
diffing data structures in tests.
* ContinueOnMethod
Enables recursion into types after invoking error and Stringer interface
methods. Recursion after method invocation is disabled by default.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013 Dave Collins <dave@davec.name>
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -129,7 +129,7 @@ func (d *dumpState) dumpPtr(v reflect.Value) {
d.w.Write(closeParenBytes)
// Display pointer information.
if len(pointerChain) > 0 {
if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
d.w.Write(openParenBytes)
for i, addr := range pointerChain {
if i > 0 {
@ -282,13 +282,13 @@ func (d *dumpState) dump(v reflect.Value) {
case reflect.Map, reflect.String:
valueLen = v.Len()
}
if valueLen != 0 || valueCap != 0 {
if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
d.w.Write(openParenBytes)
if valueLen != 0 {
d.w.Write(lenEqualsBytes)
printInt(d.w, int64(valueLen), 10)
}
if valueCap != 0 {
if !d.cs.DisableCapacities && valueCap != 0 {
if valueLen != 0 {
d.w.Write(spaceBytes)
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013 Dave Collins <dave@davec.name>
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013 Dave Collins <dave@davec.name>
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above

View File

@ -18,7 +18,7 @@ const (
)
var (
maxAgeRx = regexp.MustCompile("max-age=([0-9]+)")
maxAgeRx = regexp.MustCompile("max-age= *([0-9]+)")
)
const (

View File

@ -72,6 +72,7 @@ type Writer struct {
handle syscall.Handle
lastbuf bytes.Buffer
oldattr word
oldpos coord
}
func NewColorable(file *os.File) io.Writer {
@ -83,7 +84,7 @@ func NewColorable(file *os.File) io.Writer {
var csbi consoleScreenBufferInfo
handle := syscall.Handle(file.Fd())
procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
return &Writer{out: file, handle: handle, oldattr: csbi.attributes}
return &Writer{out: file, handle: handle, oldattr: csbi.attributes, oldpos: coord{0, 0}}
} else {
return file
}
@ -644,6 +645,11 @@ loop:
ci.visible = 0
procSetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci)))
}
case 's':
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
w.oldpos = csbi.cursorPosition
case 'u':
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&w.oldpos)))
}
}
return len(data) - w.lastbuf.Len(), nil

526
vendor/golang.org/x/crypto/cast5/cast5.go generated vendored Normal file
View File

@ -0,0 +1,526 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package cast5 implements CAST5, as defined in RFC 2144. CAST5 is a common
// OpenPGP cipher.
package cast5 // import "golang.org/x/crypto/cast5"
import "errors"
const BlockSize = 8
const KeySize = 16
type Cipher struct {
masking [16]uint32
rotate [16]uint8
}
func NewCipher(key []byte) (c *Cipher, err error) {
if len(key) != KeySize {
return nil, errors.New("CAST5: keys must be 16 bytes")
}
c = new(Cipher)
c.keySchedule(key)
return
}
func (c *Cipher) BlockSize() int {
return BlockSize
}
func (c *Cipher) Encrypt(dst, src []byte) {
l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
l, r = r, l^f1(r, c.masking[0], c.rotate[0])
l, r = r, l^f2(r, c.masking[1], c.rotate[1])
l, r = r, l^f3(r, c.masking[2], c.rotate[2])
l, r = r, l^f1(r, c.masking[3], c.rotate[3])
l, r = r, l^f2(r, c.masking[4], c.rotate[4])
l, r = r, l^f3(r, c.masking[5], c.rotate[5])
l, r = r, l^f1(r, c.masking[6], c.rotate[6])
l, r = r, l^f2(r, c.masking[7], c.rotate[7])
l, r = r, l^f3(r, c.masking[8], c.rotate[8])
l, r = r, l^f1(r, c.masking[9], c.rotate[9])
l, r = r, l^f2(r, c.masking[10], c.rotate[10])
l, r = r, l^f3(r, c.masking[11], c.rotate[11])
l, r = r, l^f1(r, c.masking[12], c.rotate[12])
l, r = r, l^f2(r, c.masking[13], c.rotate[13])
l, r = r, l^f3(r, c.masking[14], c.rotate[14])
l, r = r, l^f1(r, c.masking[15], c.rotate[15])
dst[0] = uint8(r >> 24)
dst[1] = uint8(r >> 16)
dst[2] = uint8(r >> 8)
dst[3] = uint8(r)
dst[4] = uint8(l >> 24)
dst[5] = uint8(l >> 16)
dst[6] = uint8(l >> 8)
dst[7] = uint8(l)
}
func (c *Cipher) Decrypt(dst, src []byte) {
l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
l, r = r, l^f1(r, c.masking[15], c.rotate[15])
l, r = r, l^f3(r, c.masking[14], c.rotate[14])
l, r = r, l^f2(r, c.masking[13], c.rotate[13])
l, r = r, l^f1(r, c.masking[12], c.rotate[12])
l, r = r, l^f3(r, c.masking[11], c.rotate[11])
l, r = r, l^f2(r, c.masking[10], c.rotate[10])
l, r = r, l^f1(r, c.masking[9], c.rotate[9])
l, r = r, l^f3(r, c.masking[8], c.rotate[8])
l, r = r, l^f2(r, c.masking[7], c.rotate[7])
l, r = r, l^f1(r, c.masking[6], c.rotate[6])
l, r = r, l^f3(r, c.masking[5], c.rotate[5])
l, r = r, l^f2(r, c.masking[4], c.rotate[4])
l, r = r, l^f1(r, c.masking[3], c.rotate[3])
l, r = r, l^f3(r, c.masking[2], c.rotate[2])
l, r = r, l^f2(r, c.masking[1], c.rotate[1])
l, r = r, l^f1(r, c.masking[0], c.rotate[0])
dst[0] = uint8(r >> 24)
dst[1] = uint8(r >> 16)
dst[2] = uint8(r >> 8)
dst[3] = uint8(r)
dst[4] = uint8(l >> 24)
dst[5] = uint8(l >> 16)
dst[6] = uint8(l >> 8)
dst[7] = uint8(l)
}
type keyScheduleA [4][7]uint8
type keyScheduleB [4][5]uint8
// keyScheduleRound contains the magic values for a round of the key schedule.
// The keyScheduleA deals with the lines like:
// z0z1z2z3 = x0x1x2x3 ^ S5[xD] ^ S6[xF] ^ S7[xC] ^ S8[xE] ^ S7[x8]
// Conceptually, both x and z are in the same array, x first. The first
// element describes which word of this array gets written to and the
// second, which word gets read. So, for the line above, it's "4, 0", because
// it's writing to the first word of z, which, being after x, is word 4, and
// reading from the first word of x: word 0.
//
// Next are the indexes into the S-boxes. Now the array is treated as bytes. So
// "xD" is 0xd. The first byte of z is written as "16 + 0", just to be clear
// that it's z that we're indexing.
//
// keyScheduleB deals with lines like:
// K1 = S5[z8] ^ S6[z9] ^ S7[z7] ^ S8[z6] ^ S5[z2]
// "K1" is ignored because key words are always written in order. So the five
// elements are the S-box indexes. They use the same form as in keyScheduleA,
// above.
type keyScheduleRound struct{}
type keySchedule []keyScheduleRound
var schedule = []struct {
a keyScheduleA
b keyScheduleB
}{
{
keyScheduleA{
{4, 0, 0xd, 0xf, 0xc, 0xe, 0x8},
{5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa},
{6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9},
{7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb},
},
keyScheduleB{
{16 + 8, 16 + 9, 16 + 7, 16 + 6, 16 + 2},
{16 + 0xa, 16 + 0xb, 16 + 5, 16 + 4, 16 + 6},
{16 + 0xc, 16 + 0xd, 16 + 3, 16 + 2, 16 + 9},
{16 + 0xe, 16 + 0xf, 16 + 1, 16 + 0, 16 + 0xc},
},
},
{
keyScheduleA{
{0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0},
{1, 4, 0, 2, 1, 3, 16 + 2},
{2, 5, 7, 6, 5, 4, 16 + 1},
{3, 7, 0xa, 9, 0xb, 8, 16 + 3},
},
keyScheduleB{
{3, 2, 0xc, 0xd, 8},
{1, 0, 0xe, 0xf, 0xd},
{7, 6, 8, 9, 3},
{5, 4, 0xa, 0xb, 7},
},
},
{
keyScheduleA{
{4, 0, 0xd, 0xf, 0xc, 0xe, 8},
{5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa},
{6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9},
{7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb},
},
keyScheduleB{
{16 + 3, 16 + 2, 16 + 0xc, 16 + 0xd, 16 + 9},
{16 + 1, 16 + 0, 16 + 0xe, 16 + 0xf, 16 + 0xc},
{16 + 7, 16 + 6, 16 + 8, 16 + 9, 16 + 2},
{16 + 5, 16 + 4, 16 + 0xa, 16 + 0xb, 16 + 6},
},
},
{
keyScheduleA{
{0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0},
{1, 4, 0, 2, 1, 3, 16 + 2},
{2, 5, 7, 6, 5, 4, 16 + 1},
{3, 7, 0xa, 9, 0xb, 8, 16 + 3},
},
keyScheduleB{
{8, 9, 7, 6, 3},
{0xa, 0xb, 5, 4, 7},
{0xc, 0xd, 3, 2, 8},
{0xe, 0xf, 1, 0, 0xd},
},
},
}
func (c *Cipher) keySchedule(in []byte) {
var t [8]uint32
var k [32]uint32
for i := 0; i < 4; i++ {
j := i * 4
t[i] = uint32(in[j])<<24 | uint32(in[j+1])<<16 | uint32(in[j+2])<<8 | uint32(in[j+3])
}
x := []byte{6, 7, 4, 5}
ki := 0
for half := 0; half < 2; half++ {
for _, round := range schedule {
for j := 0; j < 4; j++ {
var a [7]uint8
copy(a[:], round.a[j][:])
w := t[a[1]]
w ^= sBox[4][(t[a[2]>>2]>>(24-8*(a[2]&3)))&0xff]
w ^= sBox[5][(t[a[3]>>2]>>(24-8*(a[3]&3)))&0xff]
w ^= sBox[6][(t[a[4]>>2]>>(24-8*(a[4]&3)))&0xff]
w ^= sBox[7][(t[a[5]>>2]>>(24-8*(a[5]&3)))&0xff]
w ^= sBox[x[j]][(t[a[6]>>2]>>(24-8*(a[6]&3)))&0xff]
t[a[0]] = w
}
for j := 0; j < 4; j++ {
var b [5]uint8
copy(b[:], round.b[j][:])
w := sBox[4][(t[b[0]>>2]>>(24-8*(b[0]&3)))&0xff]
w ^= sBox[5][(t[b[1]>>2]>>(24-8*(b[1]&3)))&0xff]
w ^= sBox[6][(t[b[2]>>2]>>(24-8*(b[2]&3)))&0xff]
w ^= sBox[7][(t[b[3]>>2]>>(24-8*(b[3]&3)))&0xff]
w ^= sBox[4+j][(t[b[4]>>2]>>(24-8*(b[4]&3)))&0xff]
k[ki] = w
ki++
}
}
}
for i := 0; i < 16; i++ {
c.masking[i] = k[i]
c.rotate[i] = uint8(k[16+i] & 0x1f)
}
}
// These are the three 'f' functions. See RFC 2144, section 2.2.
func f1(d, m uint32, r uint8) uint32 {
t := m + d
I := (t << r) | (t >> (32 - r))
return ((sBox[0][I>>24] ^ sBox[1][(I>>16)&0xff]) - sBox[2][(I>>8)&0xff]) + sBox[3][I&0xff]
}
func f2(d, m uint32, r uint8) uint32 {
t := m ^ d
I := (t << r) | (t >> (32 - r))
return ((sBox[0][I>>24] - sBox[1][(I>>16)&0xff]) + sBox[2][(I>>8)&0xff]) ^ sBox[3][I&0xff]
}
func f3(d, m uint32, r uint8) uint32 {
t := m - d
I := (t << r) | (t >> (32 - r))
return ((sBox[0][I>>24] + sBox[1][(I>>16)&0xff]) ^ sBox[2][(I>>8)&0xff]) - sBox[3][I&0xff]
}
var sBox = [8][256]uint32{
{
0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f, 0x9c004dd3, 0x6003e540, 0xcf9fc949,
0xbfd4af27, 0x88bbbdb5, 0xe2034090, 0x98d09675, 0x6e63a0e0, 0x15c361d2, 0xc2e7661d, 0x22d4ff8e,
0x28683b6f, 0xc07fd059, 0xff2379c8, 0x775f50e2, 0x43c340d3, 0xdf2f8656, 0x887ca41a, 0xa2d2bd2d,
0xa1c9e0d6, 0x346c4819, 0x61b76d87, 0x22540f2f, 0x2abe32e1, 0xaa54166b, 0x22568e3a, 0xa2d341d0,
0x66db40c8, 0xa784392f, 0x004dff2f, 0x2db9d2de, 0x97943fac, 0x4a97c1d8, 0x527644b7, 0xb5f437a7,
0xb82cbaef, 0xd751d159, 0x6ff7f0ed, 0x5a097a1f, 0x827b68d0, 0x90ecf52e, 0x22b0c054, 0xbc8e5935,
0x4b6d2f7f, 0x50bb64a2, 0xd2664910, 0xbee5812d, 0xb7332290, 0xe93b159f, 0xb48ee411, 0x4bff345d,
0xfd45c240, 0xad31973f, 0xc4f6d02e, 0x55fc8165, 0xd5b1caad, 0xa1ac2dae, 0xa2d4b76d, 0xc19b0c50,
0x882240f2, 0x0c6e4f38, 0xa4e4bfd7, 0x4f5ba272, 0x564c1d2f, 0xc59c5319, 0xb949e354, 0xb04669fe,
0xb1b6ab8a, 0xc71358dd, 0x6385c545, 0x110f935d, 0x57538ad5, 0x6a390493, 0xe63d37e0, 0x2a54f6b3,
0x3a787d5f, 0x6276a0b5, 0x19a6fcdf, 0x7a42206a, 0x29f9d4d5, 0xf61b1891, 0xbb72275e, 0xaa508167,
0x38901091, 0xc6b505eb, 0x84c7cb8c, 0x2ad75a0f, 0x874a1427, 0xa2d1936b, 0x2ad286af, 0xaa56d291,
0xd7894360, 0x425c750d, 0x93b39e26, 0x187184c9, 0x6c00b32d, 0x73e2bb14, 0xa0bebc3c, 0x54623779,
0x64459eab, 0x3f328b82, 0x7718cf82, 0x59a2cea6, 0x04ee002e, 0x89fe78e6, 0x3fab0950, 0x325ff6c2,
0x81383f05, 0x6963c5c8, 0x76cb5ad6, 0xd49974c9, 0xca180dcf, 0x380782d5, 0xc7fa5cf6, 0x8ac31511,
0x35e79e13, 0x47da91d0, 0xf40f9086, 0xa7e2419e, 0x31366241, 0x051ef495, 0xaa573b04, 0x4a805d8d,
0x548300d0, 0x00322a3c, 0xbf64cddf, 0xba57a68e, 0x75c6372b, 0x50afd341, 0xa7c13275, 0x915a0bf5,
0x6b54bfab, 0x2b0b1426, 0xab4cc9d7, 0x449ccd82, 0xf7fbf265, 0xab85c5f3, 0x1b55db94, 0xaad4e324,
0xcfa4bd3f, 0x2deaa3e2, 0x9e204d02, 0xc8bd25ac, 0xeadf55b3, 0xd5bd9e98, 0xe31231b2, 0x2ad5ad6c,
0x954329de, 0xadbe4528, 0xd8710f69, 0xaa51c90f, 0xaa786bf6, 0x22513f1e, 0xaa51a79b, 0x2ad344cc,
0x7b5a41f0, 0xd37cfbad, 0x1b069505, 0x41ece491, 0xb4c332e6, 0x032268d4, 0xc9600acc, 0xce387e6d,
0xbf6bb16c, 0x6a70fb78, 0x0d03d9c9, 0xd4df39de, 0xe01063da, 0x4736f464, 0x5ad328d8, 0xb347cc96,
0x75bb0fc3, 0x98511bfb, 0x4ffbcc35, 0xb58bcf6a, 0xe11f0abc, 0xbfc5fe4a, 0xa70aec10, 0xac39570a,
0x3f04442f, 0x6188b153, 0xe0397a2e, 0x5727cb79, 0x9ceb418f, 0x1cacd68d, 0x2ad37c96, 0x0175cb9d,
0xc69dff09, 0xc75b65f0, 0xd9db40d8, 0xec0e7779, 0x4744ead4, 0xb11c3274, 0xdd24cb9e, 0x7e1c54bd,
0xf01144f9, 0xd2240eb1, 0x9675b3fd, 0xa3ac3755, 0xd47c27af, 0x51c85f4d, 0x56907596, 0xa5bb15e6,
0x580304f0, 0xca042cf1, 0x011a37ea, 0x8dbfaadb, 0x35ba3e4a, 0x3526ffa0, 0xc37b4d09, 0xbc306ed9,
0x98a52666, 0x5648f725, 0xff5e569d, 0x0ced63d0, 0x7c63b2cf, 0x700b45e1, 0xd5ea50f1, 0x85a92872,
0xaf1fbda7, 0xd4234870, 0xa7870bf3, 0x2d3b4d79, 0x42e04198, 0x0cd0ede7, 0x26470db8, 0xf881814c,
0x474d6ad7, 0x7c0c5e5c, 0xd1231959, 0x381b7298, 0xf5d2f4db, 0xab838653, 0x6e2f1e23, 0x83719c9e,
0xbd91e046, 0x9a56456e, 0xdc39200c, 0x20c8c571, 0x962bda1c, 0xe1e696ff, 0xb141ab08, 0x7cca89b9,
0x1a69e783, 0x02cc4843, 0xa2f7c579, 0x429ef47d, 0x427b169c, 0x5ac9f049, 0xdd8f0f00, 0x5c8165bf,
},
{
0x1f201094, 0xef0ba75b, 0x69e3cf7e, 0x393f4380, 0xfe61cf7a, 0xeec5207a, 0x55889c94, 0x72fc0651,
0xada7ef79, 0x4e1d7235, 0xd55a63ce, 0xde0436ba, 0x99c430ef, 0x5f0c0794, 0x18dcdb7d, 0xa1d6eff3,
0xa0b52f7b, 0x59e83605, 0xee15b094, 0xe9ffd909, 0xdc440086, 0xef944459, 0xba83ccb3, 0xe0c3cdfb,
0xd1da4181, 0x3b092ab1, 0xf997f1c1, 0xa5e6cf7b, 0x01420ddb, 0xe4e7ef5b, 0x25a1ff41, 0xe180f806,
0x1fc41080, 0x179bee7a, 0xd37ac6a9, 0xfe5830a4, 0x98de8b7f, 0x77e83f4e, 0x79929269, 0x24fa9f7b,
0xe113c85b, 0xacc40083, 0xd7503525, 0xf7ea615f, 0x62143154, 0x0d554b63, 0x5d681121, 0xc866c359,
0x3d63cf73, 0xcee234c0, 0xd4d87e87, 0x5c672b21, 0x071f6181, 0x39f7627f, 0x361e3084, 0xe4eb573b,
0x602f64a4, 0xd63acd9c, 0x1bbc4635, 0x9e81032d, 0x2701f50c, 0x99847ab4, 0xa0e3df79, 0xba6cf38c,
0x10843094, 0x2537a95e, 0xf46f6ffe, 0xa1ff3b1f, 0x208cfb6a, 0x8f458c74, 0xd9e0a227, 0x4ec73a34,
0xfc884f69, 0x3e4de8df, 0xef0e0088, 0x3559648d, 0x8a45388c, 0x1d804366, 0x721d9bfd, 0xa58684bb,
0xe8256333, 0x844e8212, 0x128d8098, 0xfed33fb4, 0xce280ae1, 0x27e19ba5, 0xd5a6c252, 0xe49754bd,
0xc5d655dd, 0xeb667064, 0x77840b4d, 0xa1b6a801, 0x84db26a9, 0xe0b56714, 0x21f043b7, 0xe5d05860,
0x54f03084, 0x066ff472, 0xa31aa153, 0xdadc4755, 0xb5625dbf, 0x68561be6, 0x83ca6b94, 0x2d6ed23b,
0xeccf01db, 0xa6d3d0ba, 0xb6803d5c, 0xaf77a709, 0x33b4a34c, 0x397bc8d6, 0x5ee22b95, 0x5f0e5304,
0x81ed6f61, 0x20e74364, 0xb45e1378, 0xde18639b, 0x881ca122, 0xb96726d1, 0x8049a7e8, 0x22b7da7b,
0x5e552d25, 0x5272d237, 0x79d2951c, 0xc60d894c, 0x488cb402, 0x1ba4fe5b, 0xa4b09f6b, 0x1ca815cf,
0xa20c3005, 0x8871df63, 0xb9de2fcb, 0x0cc6c9e9, 0x0beeff53, 0xe3214517, 0xb4542835, 0x9f63293c,
0xee41e729, 0x6e1d2d7c, 0x50045286, 0x1e6685f3, 0xf33401c6, 0x30a22c95, 0x31a70850, 0x60930f13,
0x73f98417, 0xa1269859, 0xec645c44, 0x52c877a9, 0xcdff33a6, 0xa02b1741, 0x7cbad9a2, 0x2180036f,
0x50d99c08, 0xcb3f4861, 0xc26bd765, 0x64a3f6ab, 0x80342676, 0x25a75e7b, 0xe4e6d1fc, 0x20c710e6,
0xcdf0b680, 0x17844d3b, 0x31eef84d, 0x7e0824e4, 0x2ccb49eb, 0x846a3bae, 0x8ff77888, 0xee5d60f6,
0x7af75673, 0x2fdd5cdb, 0xa11631c1, 0x30f66f43, 0xb3faec54, 0x157fd7fa, 0xef8579cc, 0xd152de58,
0xdb2ffd5e, 0x8f32ce19, 0x306af97a, 0x02f03ef8, 0x99319ad5, 0xc242fa0f, 0xa7e3ebb0, 0xc68e4906,
0xb8da230c, 0x80823028, 0xdcdef3c8, 0xd35fb171, 0x088a1bc8, 0xbec0c560, 0x61a3c9e8, 0xbca8f54d,
0xc72feffa, 0x22822e99, 0x82c570b4, 0xd8d94e89, 0x8b1c34bc, 0x301e16e6, 0x273be979, 0xb0ffeaa6,
0x61d9b8c6, 0x00b24869, 0xb7ffce3f, 0x08dc283b, 0x43daf65a, 0xf7e19798, 0x7619b72f, 0x8f1c9ba4,
0xdc8637a0, 0x16a7d3b1, 0x9fc393b7, 0xa7136eeb, 0xc6bcc63e, 0x1a513742, 0xef6828bc, 0x520365d6,
0x2d6a77ab, 0x3527ed4b, 0x821fd216, 0x095c6e2e, 0xdb92f2fb, 0x5eea29cb, 0x145892f5, 0x91584f7f,
0x5483697b, 0x2667a8cc, 0x85196048, 0x8c4bacea, 0x833860d4, 0x0d23e0f9, 0x6c387e8a, 0x0ae6d249,
0xb284600c, 0xd835731d, 0xdcb1c647, 0xac4c56ea, 0x3ebd81b3, 0x230eabb0, 0x6438bc87, 0xf0b5b1fa,
0x8f5ea2b3, 0xfc184642, 0x0a036b7a, 0x4fb089bd, 0x649da589, 0xa345415e, 0x5c038323, 0x3e5d3bb9,
0x43d79572, 0x7e6dd07c, 0x06dfdf1e, 0x6c6cc4ef, 0x7160a539, 0x73bfbe70, 0x83877605, 0x4523ecf1,
},
{
0x8defc240, 0x25fa5d9f, 0xeb903dbf, 0xe810c907, 0x47607fff, 0x369fe44b, 0x8c1fc644, 0xaececa90,
0xbeb1f9bf, 0xeefbcaea, 0xe8cf1950, 0x51df07ae, 0x920e8806, 0xf0ad0548, 0xe13c8d83, 0x927010d5,
0x11107d9f, 0x07647db9, 0xb2e3e4d4, 0x3d4f285e, 0xb9afa820, 0xfade82e0, 0xa067268b, 0x8272792e,
0x553fb2c0, 0x489ae22b, 0xd4ef9794, 0x125e3fbc, 0x21fffcee, 0x825b1bfd, 0x9255c5ed, 0x1257a240,
0x4e1a8302, 0xbae07fff, 0x528246e7, 0x8e57140e, 0x3373f7bf, 0x8c9f8188, 0xa6fc4ee8, 0xc982b5a5,
0xa8c01db7, 0x579fc264, 0x67094f31, 0xf2bd3f5f, 0x40fff7c1, 0x1fb78dfc, 0x8e6bd2c1, 0x437be59b,
0x99b03dbf, 0xb5dbc64b, 0x638dc0e6, 0x55819d99, 0xa197c81c, 0x4a012d6e, 0xc5884a28, 0xccc36f71,
0xb843c213, 0x6c0743f1, 0x8309893c, 0x0feddd5f, 0x2f7fe850, 0xd7c07f7e, 0x02507fbf, 0x5afb9a04,
0xa747d2d0, 0x1651192e, 0xaf70bf3e, 0x58c31380, 0x5f98302e, 0x727cc3c4, 0x0a0fb402, 0x0f7fef82,
0x8c96fdad, 0x5d2c2aae, 0x8ee99a49, 0x50da88b8, 0x8427f4a0, 0x1eac5790, 0x796fb449, 0x8252dc15,
0xefbd7d9b, 0xa672597d, 0xada840d8, 0x45f54504, 0xfa5d7403, 0xe83ec305, 0x4f91751a, 0x925669c2,
0x23efe941, 0xa903f12e, 0x60270df2, 0x0276e4b6, 0x94fd6574, 0x927985b2, 0x8276dbcb, 0x02778176,
0xf8af918d, 0x4e48f79e, 0x8f616ddf, 0xe29d840e, 0x842f7d83, 0x340ce5c8, 0x96bbb682, 0x93b4b148,
0xef303cab, 0x984faf28, 0x779faf9b, 0x92dc560d, 0x224d1e20, 0x8437aa88, 0x7d29dc96, 0x2756d3dc,
0x8b907cee, 0xb51fd240, 0xe7c07ce3, 0xe566b4a1, 0xc3e9615e, 0x3cf8209d, 0x6094d1e3, 0xcd9ca341,
0x5c76460e, 0x00ea983b, 0xd4d67881, 0xfd47572c, 0xf76cedd9, 0xbda8229c, 0x127dadaa, 0x438a074e,
0x1f97c090, 0x081bdb8a, 0x93a07ebe, 0xb938ca15, 0x97b03cff, 0x3dc2c0f8, 0x8d1ab2ec, 0x64380e51,
0x68cc7bfb, 0xd90f2788, 0x12490181, 0x5de5ffd4, 0xdd7ef86a, 0x76a2e214, 0xb9a40368, 0x925d958f,
0x4b39fffa, 0xba39aee9, 0xa4ffd30b, 0xfaf7933b, 0x6d498623, 0x193cbcfa, 0x27627545, 0x825cf47a,
0x61bd8ba0, 0xd11e42d1, 0xcead04f4, 0x127ea392, 0x10428db7, 0x8272a972, 0x9270c4a8, 0x127de50b,
0x285ba1c8, 0x3c62f44f, 0x35c0eaa5, 0xe805d231, 0x428929fb, 0xb4fcdf82, 0x4fb66a53, 0x0e7dc15b,
0x1f081fab, 0x108618ae, 0xfcfd086d, 0xf9ff2889, 0x694bcc11, 0x236a5cae, 0x12deca4d, 0x2c3f8cc5,
0xd2d02dfe, 0xf8ef5896, 0xe4cf52da, 0x95155b67, 0x494a488c, 0xb9b6a80c, 0x5c8f82bc, 0x89d36b45,
0x3a609437, 0xec00c9a9, 0x44715253, 0x0a874b49, 0xd773bc40, 0x7c34671c, 0x02717ef6, 0x4feb5536,
0xa2d02fff, 0xd2bf60c4, 0xd43f03c0, 0x50b4ef6d, 0x07478cd1, 0x006e1888, 0xa2e53f55, 0xb9e6d4bc,
0xa2048016, 0x97573833, 0xd7207d67, 0xde0f8f3d, 0x72f87b33, 0xabcc4f33, 0x7688c55d, 0x7b00a6b0,
0x947b0001, 0x570075d2, 0xf9bb88f8, 0x8942019e, 0x4264a5ff, 0x856302e0, 0x72dbd92b, 0xee971b69,
0x6ea22fde, 0x5f08ae2b, 0xaf7a616d, 0xe5c98767, 0xcf1febd2, 0x61efc8c2, 0xf1ac2571, 0xcc8239c2,
0x67214cb8, 0xb1e583d1, 0xb7dc3e62, 0x7f10bdce, 0xf90a5c38, 0x0ff0443d, 0x606e6dc6, 0x60543a49,
0x5727c148, 0x2be98a1d, 0x8ab41738, 0x20e1be24, 0xaf96da0f, 0x68458425, 0x99833be5, 0x600d457d,
0x282f9350, 0x8334b362, 0xd91d1120, 0x2b6d8da0, 0x642b1e31, 0x9c305a00, 0x52bce688, 0x1b03588a,
0xf7baefd5, 0x4142ed9c, 0xa4315c11, 0x83323ec5, 0xdfef4636, 0xa133c501, 0xe9d3531c, 0xee353783,
},
{
0x9db30420, 0x1fb6e9de, 0xa7be7bef, 0xd273a298, 0x4a4f7bdb, 0x64ad8c57, 0x85510443, 0xfa020ed1,
0x7e287aff, 0xe60fb663, 0x095f35a1, 0x79ebf120, 0xfd059d43, 0x6497b7b1, 0xf3641f63, 0x241e4adf,
0x28147f5f, 0x4fa2b8cd, 0xc9430040, 0x0cc32220, 0xfdd30b30, 0xc0a5374f, 0x1d2d00d9, 0x24147b15,
0xee4d111a, 0x0fca5167, 0x71ff904c, 0x2d195ffe, 0x1a05645f, 0x0c13fefe, 0x081b08ca, 0x05170121,
0x80530100, 0xe83e5efe, 0xac9af4f8, 0x7fe72701, 0xd2b8ee5f, 0x06df4261, 0xbb9e9b8a, 0x7293ea25,
0xce84ffdf, 0xf5718801, 0x3dd64b04, 0xa26f263b, 0x7ed48400, 0x547eebe6, 0x446d4ca0, 0x6cf3d6f5,
0x2649abdf, 0xaea0c7f5, 0x36338cc1, 0x503f7e93, 0xd3772061, 0x11b638e1, 0x72500e03, 0xf80eb2bb,
0xabe0502e, 0xec8d77de, 0x57971e81, 0xe14f6746, 0xc9335400, 0x6920318f, 0x081dbb99, 0xffc304a5,
0x4d351805, 0x7f3d5ce3, 0xa6c866c6, 0x5d5bcca9, 0xdaec6fea, 0x9f926f91, 0x9f46222f, 0x3991467d,
0xa5bf6d8e, 0x1143c44f, 0x43958302, 0xd0214eeb, 0x022083b8, 0x3fb6180c, 0x18f8931e, 0x281658e6,
0x26486e3e, 0x8bd78a70, 0x7477e4c1, 0xb506e07c, 0xf32d0a25, 0x79098b02, 0xe4eabb81, 0x28123b23,
0x69dead38, 0x1574ca16, 0xdf871b62, 0x211c40b7, 0xa51a9ef9, 0x0014377b, 0x041e8ac8, 0x09114003,
0xbd59e4d2, 0xe3d156d5, 0x4fe876d5, 0x2f91a340, 0x557be8de, 0x00eae4a7, 0x0ce5c2ec, 0x4db4bba6,
0xe756bdff, 0xdd3369ac, 0xec17b035, 0x06572327, 0x99afc8b0, 0x56c8c391, 0x6b65811c, 0x5e146119,
0x6e85cb75, 0xbe07c002, 0xc2325577, 0x893ff4ec, 0x5bbfc92d, 0xd0ec3b25, 0xb7801ab7, 0x8d6d3b24,
0x20c763ef, 0xc366a5fc, 0x9c382880, 0x0ace3205, 0xaac9548a, 0xeca1d7c7, 0x041afa32, 0x1d16625a,
0x6701902c, 0x9b757a54, 0x31d477f7, 0x9126b031, 0x36cc6fdb, 0xc70b8b46, 0xd9e66a48, 0x56e55a79,
0x026a4ceb, 0x52437eff, 0x2f8f76b4, 0x0df980a5, 0x8674cde3, 0xedda04eb, 0x17a9be04, 0x2c18f4df,
0xb7747f9d, 0xab2af7b4, 0xefc34d20, 0x2e096b7c, 0x1741a254, 0xe5b6a035, 0x213d42f6, 0x2c1c7c26,
0x61c2f50f, 0x6552daf9, 0xd2c231f8, 0x25130f69, 0xd8167fa2, 0x0418f2c8, 0x001a96a6, 0x0d1526ab,
0x63315c21, 0x5e0a72ec, 0x49bafefd, 0x187908d9, 0x8d0dbd86, 0x311170a7, 0x3e9b640c, 0xcc3e10d7,
0xd5cad3b6, 0x0caec388, 0xf73001e1, 0x6c728aff, 0x71eae2a1, 0x1f9af36e, 0xcfcbd12f, 0xc1de8417,
0xac07be6b, 0xcb44a1d8, 0x8b9b0f56, 0x013988c3, 0xb1c52fca, 0xb4be31cd, 0xd8782806, 0x12a3a4e2,
0x6f7de532, 0x58fd7eb6, 0xd01ee900, 0x24adffc2, 0xf4990fc5, 0x9711aac5, 0x001d7b95, 0x82e5e7d2,
0x109873f6, 0x00613096, 0xc32d9521, 0xada121ff, 0x29908415, 0x7fbb977f, 0xaf9eb3db, 0x29c9ed2a,
0x5ce2a465, 0xa730f32c, 0xd0aa3fe8, 0x8a5cc091, 0xd49e2ce7, 0x0ce454a9, 0xd60acd86, 0x015f1919,
0x77079103, 0xdea03af6, 0x78a8565e, 0xdee356df, 0x21f05cbe, 0x8b75e387, 0xb3c50651, 0xb8a5c3ef,
0xd8eeb6d2, 0xe523be77, 0xc2154529, 0x2f69efdf, 0xafe67afb, 0xf470c4b2, 0xf3e0eb5b, 0xd6cc9876,
0x39e4460c, 0x1fda8538, 0x1987832f, 0xca007367, 0xa99144f8, 0x296b299e, 0x492fc295, 0x9266beab,
0xb5676e69, 0x9bd3ddda, 0xdf7e052f, 0xdb25701c, 0x1b5e51ee, 0xf65324e6, 0x6afce36c, 0x0316cc04,
0x8644213e, 0xb7dc59d0, 0x7965291f, 0xccd6fd43, 0x41823979, 0x932bcdf6, 0xb657c34d, 0x4edfd282,
0x7ae5290c, 0x3cb9536b, 0x851e20fe, 0x9833557e, 0x13ecf0b0, 0xd3ffb372, 0x3f85c5c1, 0x0aef7ed2,
},
{
0x7ec90c04, 0x2c6e74b9, 0x9b0e66df, 0xa6337911, 0xb86a7fff, 0x1dd358f5, 0x44dd9d44, 0x1731167f,
0x08fbf1fa, 0xe7f511cc, 0xd2051b00, 0x735aba00, 0x2ab722d8, 0x386381cb, 0xacf6243a, 0x69befd7a,
0xe6a2e77f, 0xf0c720cd, 0xc4494816, 0xccf5c180, 0x38851640, 0x15b0a848, 0xe68b18cb, 0x4caadeff,
0x5f480a01, 0x0412b2aa, 0x259814fc, 0x41d0efe2, 0x4e40b48d, 0x248eb6fb, 0x8dba1cfe, 0x41a99b02,
0x1a550a04, 0xba8f65cb, 0x7251f4e7, 0x95a51725, 0xc106ecd7, 0x97a5980a, 0xc539b9aa, 0x4d79fe6a,
0xf2f3f763, 0x68af8040, 0xed0c9e56, 0x11b4958b, 0xe1eb5a88, 0x8709e6b0, 0xd7e07156, 0x4e29fea7,
0x6366e52d, 0x02d1c000, 0xc4ac8e05, 0x9377f571, 0x0c05372a, 0x578535f2, 0x2261be02, 0xd642a0c9,
0xdf13a280, 0x74b55bd2, 0x682199c0, 0xd421e5ec, 0x53fb3ce8, 0xc8adedb3, 0x28a87fc9, 0x3d959981,
0x5c1ff900, 0xfe38d399, 0x0c4eff0b, 0x062407ea, 0xaa2f4fb1, 0x4fb96976, 0x90c79505, 0xb0a8a774,
0xef55a1ff, 0xe59ca2c2, 0xa6b62d27, 0xe66a4263, 0xdf65001f, 0x0ec50966, 0xdfdd55bc, 0x29de0655,
0x911e739a, 0x17af8975, 0x32c7911c, 0x89f89468, 0x0d01e980, 0x524755f4, 0x03b63cc9, 0x0cc844b2,
0xbcf3f0aa, 0x87ac36e9, 0xe53a7426, 0x01b3d82b, 0x1a9e7449, 0x64ee2d7e, 0xcddbb1da, 0x01c94910,
0xb868bf80, 0x0d26f3fd, 0x9342ede7, 0x04a5c284, 0x636737b6, 0x50f5b616, 0xf24766e3, 0x8eca36c1,
0x136e05db, 0xfef18391, 0xfb887a37, 0xd6e7f7d4, 0xc7fb7dc9, 0x3063fcdf, 0xb6f589de, 0xec2941da,
0x26e46695, 0xb7566419, 0xf654efc5, 0xd08d58b7, 0x48925401, 0xc1bacb7f, 0xe5ff550f, 0xb6083049,
0x5bb5d0e8, 0x87d72e5a, 0xab6a6ee1, 0x223a66ce, 0xc62bf3cd, 0x9e0885f9, 0x68cb3e47, 0x086c010f,
0xa21de820, 0xd18b69de, 0xf3f65777, 0xfa02c3f6, 0x407edac3, 0xcbb3d550, 0x1793084d, 0xb0d70eba,
0x0ab378d5, 0xd951fb0c, 0xded7da56, 0x4124bbe4, 0x94ca0b56, 0x0f5755d1, 0xe0e1e56e, 0x6184b5be,
0x580a249f, 0x94f74bc0, 0xe327888e, 0x9f7b5561, 0xc3dc0280, 0x05687715, 0x646c6bd7, 0x44904db3,
0x66b4f0a3, 0xc0f1648a, 0x697ed5af, 0x49e92ff6, 0x309e374f, 0x2cb6356a, 0x85808573, 0x4991f840,
0x76f0ae02, 0x083be84d, 0x28421c9a, 0x44489406, 0x736e4cb8, 0xc1092910, 0x8bc95fc6, 0x7d869cf4,
0x134f616f, 0x2e77118d, 0xb31b2be1, 0xaa90b472, 0x3ca5d717, 0x7d161bba, 0x9cad9010, 0xaf462ba2,
0x9fe459d2, 0x45d34559, 0xd9f2da13, 0xdbc65487, 0xf3e4f94e, 0x176d486f, 0x097c13ea, 0x631da5c7,
0x445f7382, 0x175683f4, 0xcdc66a97, 0x70be0288, 0xb3cdcf72, 0x6e5dd2f3, 0x20936079, 0x459b80a5,
0xbe60e2db, 0xa9c23101, 0xeba5315c, 0x224e42f2, 0x1c5c1572, 0xf6721b2c, 0x1ad2fff3, 0x8c25404e,
0x324ed72f, 0x4067b7fd, 0x0523138e, 0x5ca3bc78, 0xdc0fd66e, 0x75922283, 0x784d6b17, 0x58ebb16e,
0x44094f85, 0x3f481d87, 0xfcfeae7b, 0x77b5ff76, 0x8c2302bf, 0xaaf47556, 0x5f46b02a, 0x2b092801,
0x3d38f5f7, 0x0ca81f36, 0x52af4a8a, 0x66d5e7c0, 0xdf3b0874, 0x95055110, 0x1b5ad7a8, 0xf61ed5ad,
0x6cf6e479, 0x20758184, 0xd0cefa65, 0x88f7be58, 0x4a046826, 0x0ff6f8f3, 0xa09c7f70, 0x5346aba0,
0x5ce96c28, 0xe176eda3, 0x6bac307f, 0x376829d2, 0x85360fa9, 0x17e3fe2a, 0x24b79767, 0xf5a96b20,
0xd6cd2595, 0x68ff1ebf, 0x7555442c, 0xf19f06be, 0xf9e0659a, 0xeeb9491d, 0x34010718, 0xbb30cab8,
0xe822fe15, 0x88570983, 0x750e6249, 0xda627e55, 0x5e76ffa8, 0xb1534546, 0x6d47de08, 0xefe9e7d4,
},
{
0xf6fa8f9d, 0x2cac6ce1, 0x4ca34867, 0xe2337f7c, 0x95db08e7, 0x016843b4, 0xeced5cbc, 0x325553ac,
0xbf9f0960, 0xdfa1e2ed, 0x83f0579d, 0x63ed86b9, 0x1ab6a6b8, 0xde5ebe39, 0xf38ff732, 0x8989b138,
0x33f14961, 0xc01937bd, 0xf506c6da, 0xe4625e7e, 0xa308ea99, 0x4e23e33c, 0x79cbd7cc, 0x48a14367,
0xa3149619, 0xfec94bd5, 0xa114174a, 0xeaa01866, 0xa084db2d, 0x09a8486f, 0xa888614a, 0x2900af98,
0x01665991, 0xe1992863, 0xc8f30c60, 0x2e78ef3c, 0xd0d51932, 0xcf0fec14, 0xf7ca07d2, 0xd0a82072,
0xfd41197e, 0x9305a6b0, 0xe86be3da, 0x74bed3cd, 0x372da53c, 0x4c7f4448, 0xdab5d440, 0x6dba0ec3,
0x083919a7, 0x9fbaeed9, 0x49dbcfb0, 0x4e670c53, 0x5c3d9c01, 0x64bdb941, 0x2c0e636a, 0xba7dd9cd,
0xea6f7388, 0xe70bc762, 0x35f29adb, 0x5c4cdd8d, 0xf0d48d8c, 0xb88153e2, 0x08a19866, 0x1ae2eac8,
0x284caf89, 0xaa928223, 0x9334be53, 0x3b3a21bf, 0x16434be3, 0x9aea3906, 0xefe8c36e, 0xf890cdd9,
0x80226dae, 0xc340a4a3, 0xdf7e9c09, 0xa694a807, 0x5b7c5ecc, 0x221db3a6, 0x9a69a02f, 0x68818a54,
0xceb2296f, 0x53c0843a, 0xfe893655, 0x25bfe68a, 0xb4628abc, 0xcf222ebf, 0x25ac6f48, 0xa9a99387,
0x53bddb65, 0xe76ffbe7, 0xe967fd78, 0x0ba93563, 0x8e342bc1, 0xe8a11be9, 0x4980740d, 0xc8087dfc,
0x8de4bf99, 0xa11101a0, 0x7fd37975, 0xda5a26c0, 0xe81f994f, 0x9528cd89, 0xfd339fed, 0xb87834bf,
0x5f04456d, 0x22258698, 0xc9c4c83b, 0x2dc156be, 0x4f628daa, 0x57f55ec5, 0xe2220abe, 0xd2916ebf,
0x4ec75b95, 0x24f2c3c0, 0x42d15d99, 0xcd0d7fa0, 0x7b6e27ff, 0xa8dc8af0, 0x7345c106, 0xf41e232f,
0x35162386, 0xe6ea8926, 0x3333b094, 0x157ec6f2, 0x372b74af, 0x692573e4, 0xe9a9d848, 0xf3160289,
0x3a62ef1d, 0xa787e238, 0xf3a5f676, 0x74364853, 0x20951063, 0x4576698d, 0xb6fad407, 0x592af950,
0x36f73523, 0x4cfb6e87, 0x7da4cec0, 0x6c152daa, 0xcb0396a8, 0xc50dfe5d, 0xfcd707ab, 0x0921c42f,
0x89dff0bb, 0x5fe2be78, 0x448f4f33, 0x754613c9, 0x2b05d08d, 0x48b9d585, 0xdc049441, 0xc8098f9b,
0x7dede786, 0xc39a3373, 0x42410005, 0x6a091751, 0x0ef3c8a6, 0x890072d6, 0x28207682, 0xa9a9f7be,
0xbf32679d, 0xd45b5b75, 0xb353fd00, 0xcbb0e358, 0x830f220a, 0x1f8fb214, 0xd372cf08, 0xcc3c4a13,
0x8cf63166, 0x061c87be, 0x88c98f88, 0x6062e397, 0x47cf8e7a, 0xb6c85283, 0x3cc2acfb, 0x3fc06976,
0x4e8f0252, 0x64d8314d, 0xda3870e3, 0x1e665459, 0xc10908f0, 0x513021a5, 0x6c5b68b7, 0x822f8aa0,
0x3007cd3e, 0x74719eef, 0xdc872681, 0x073340d4, 0x7e432fd9, 0x0c5ec241, 0x8809286c, 0xf592d891,
0x08a930f6, 0x957ef305, 0xb7fbffbd, 0xc266e96f, 0x6fe4ac98, 0xb173ecc0, 0xbc60b42a, 0x953498da,
0xfba1ae12, 0x2d4bd736, 0x0f25faab, 0xa4f3fceb, 0xe2969123, 0x257f0c3d, 0x9348af49, 0x361400bc,
0xe8816f4a, 0x3814f200, 0xa3f94043, 0x9c7a54c2, 0xbc704f57, 0xda41e7f9, 0xc25ad33a, 0x54f4a084,
0xb17f5505, 0x59357cbe, 0xedbd15c8, 0x7f97c5ab, 0xba5ac7b5, 0xb6f6deaf, 0x3a479c3a, 0x5302da25,
0x653d7e6a, 0x54268d49, 0x51a477ea, 0x5017d55b, 0xd7d25d88, 0x44136c76, 0x0404a8c8, 0xb8e5a121,
0xb81a928a, 0x60ed5869, 0x97c55b96, 0xeaec991b, 0x29935913, 0x01fdb7f1, 0x088e8dfa, 0x9ab6f6f5,
0x3b4cbf9f, 0x4a5de3ab, 0xe6051d35, 0xa0e1d855, 0xd36b4cf1, 0xf544edeb, 0xb0e93524, 0xbebb8fbd,
0xa2d762cf, 0x49c92f54, 0x38b5f331, 0x7128a454, 0x48392905, 0xa65b1db8, 0x851c97bd, 0xd675cf2f,
},
{
0x85e04019, 0x332bf567, 0x662dbfff, 0xcfc65693, 0x2a8d7f6f, 0xab9bc912, 0xde6008a1, 0x2028da1f,
0x0227bce7, 0x4d642916, 0x18fac300, 0x50f18b82, 0x2cb2cb11, 0xb232e75c, 0x4b3695f2, 0xb28707de,
0xa05fbcf6, 0xcd4181e9, 0xe150210c, 0xe24ef1bd, 0xb168c381, 0xfde4e789, 0x5c79b0d8, 0x1e8bfd43,
0x4d495001, 0x38be4341, 0x913cee1d, 0x92a79c3f, 0x089766be, 0xbaeeadf4, 0x1286becf, 0xb6eacb19,
0x2660c200, 0x7565bde4, 0x64241f7a, 0x8248dca9, 0xc3b3ad66, 0x28136086, 0x0bd8dfa8, 0x356d1cf2,
0x107789be, 0xb3b2e9ce, 0x0502aa8f, 0x0bc0351e, 0x166bf52a, 0xeb12ff82, 0xe3486911, 0xd34d7516,
0x4e7b3aff, 0x5f43671b, 0x9cf6e037, 0x4981ac83, 0x334266ce, 0x8c9341b7, 0xd0d854c0, 0xcb3a6c88,
0x47bc2829, 0x4725ba37, 0xa66ad22b, 0x7ad61f1e, 0x0c5cbafa, 0x4437f107, 0xb6e79962, 0x42d2d816,
0x0a961288, 0xe1a5c06e, 0x13749e67, 0x72fc081a, 0xb1d139f7, 0xf9583745, 0xcf19df58, 0xbec3f756,
0xc06eba30, 0x07211b24, 0x45c28829, 0xc95e317f, 0xbc8ec511, 0x38bc46e9, 0xc6e6fa14, 0xbae8584a,
0xad4ebc46, 0x468f508b, 0x7829435f, 0xf124183b, 0x821dba9f, 0xaff60ff4, 0xea2c4e6d, 0x16e39264,
0x92544a8b, 0x009b4fc3, 0xaba68ced, 0x9ac96f78, 0x06a5b79a, 0xb2856e6e, 0x1aec3ca9, 0xbe838688,
0x0e0804e9, 0x55f1be56, 0xe7e5363b, 0xb3a1f25d, 0xf7debb85, 0x61fe033c, 0x16746233, 0x3c034c28,
0xda6d0c74, 0x79aac56c, 0x3ce4e1ad, 0x51f0c802, 0x98f8f35a, 0x1626a49f, 0xeed82b29, 0x1d382fe3,
0x0c4fb99a, 0xbb325778, 0x3ec6d97b, 0x6e77a6a9, 0xcb658b5c, 0xd45230c7, 0x2bd1408b, 0x60c03eb7,
0xb9068d78, 0xa33754f4, 0xf430c87d, 0xc8a71302, 0xb96d8c32, 0xebd4e7be, 0xbe8b9d2d, 0x7979fb06,
0xe7225308, 0x8b75cf77, 0x11ef8da4, 0xe083c858, 0x8d6b786f, 0x5a6317a6, 0xfa5cf7a0, 0x5dda0033,
0xf28ebfb0, 0xf5b9c310, 0xa0eac280, 0x08b9767a, 0xa3d9d2b0, 0x79d34217, 0x021a718d, 0x9ac6336a,
0x2711fd60, 0x438050e3, 0x069908a8, 0x3d7fedc4, 0x826d2bef, 0x4eeb8476, 0x488dcf25, 0x36c9d566,
0x28e74e41, 0xc2610aca, 0x3d49a9cf, 0xbae3b9df, 0xb65f8de6, 0x92aeaf64, 0x3ac7d5e6, 0x9ea80509,
0xf22b017d, 0xa4173f70, 0xdd1e16c3, 0x15e0d7f9, 0x50b1b887, 0x2b9f4fd5, 0x625aba82, 0x6a017962,
0x2ec01b9c, 0x15488aa9, 0xd716e740, 0x40055a2c, 0x93d29a22, 0xe32dbf9a, 0x058745b9, 0x3453dc1e,
0xd699296e, 0x496cff6f, 0x1c9f4986, 0xdfe2ed07, 0xb87242d1, 0x19de7eae, 0x053e561a, 0x15ad6f8c,
0x66626c1c, 0x7154c24c, 0xea082b2a, 0x93eb2939, 0x17dcb0f0, 0x58d4f2ae, 0x9ea294fb, 0x52cf564c,
0x9883fe66, 0x2ec40581, 0x763953c3, 0x01d6692e, 0xd3a0c108, 0xa1e7160e, 0xe4f2dfa6, 0x693ed285,
0x74904698, 0x4c2b0edd, 0x4f757656, 0x5d393378, 0xa132234f, 0x3d321c5d, 0xc3f5e194, 0x4b269301,
0xc79f022f, 0x3c997e7e, 0x5e4f9504, 0x3ffafbbd, 0x76f7ad0e, 0x296693f4, 0x3d1fce6f, 0xc61e45be,
0xd3b5ab34, 0xf72bf9b7, 0x1b0434c0, 0x4e72b567, 0x5592a33d, 0xb5229301, 0xcfd2a87f, 0x60aeb767,
0x1814386b, 0x30bcc33d, 0x38a0c07d, 0xfd1606f2, 0xc363519b, 0x589dd390, 0x5479f8e6, 0x1cb8d647,
0x97fd61a9, 0xea7759f4, 0x2d57539d, 0x569a58cf, 0xe84e63ad, 0x462e1b78, 0x6580f87e, 0xf3817914,
0x91da55f4, 0x40a230f3, 0xd1988f35, 0xb6e318d2, 0x3ffa50bc, 0x3d40f021, 0xc3c0bdae, 0x4958c24c,
0x518f36b2, 0x84b1d370, 0x0fedce83, 0x878ddada, 0xf2a279c7, 0x94e01be8, 0x90716f4b, 0x954b8aa3,
},
{
0xe216300d, 0xbbddfffc, 0xa7ebdabd, 0x35648095, 0x7789f8b7, 0xe6c1121b, 0x0e241600, 0x052ce8b5,
0x11a9cfb0, 0xe5952f11, 0xece7990a, 0x9386d174, 0x2a42931c, 0x76e38111, 0xb12def3a, 0x37ddddfc,
0xde9adeb1, 0x0a0cc32c, 0xbe197029, 0x84a00940, 0xbb243a0f, 0xb4d137cf, 0xb44e79f0, 0x049eedfd,
0x0b15a15d, 0x480d3168, 0x8bbbde5a, 0x669ded42, 0xc7ece831, 0x3f8f95e7, 0x72df191b, 0x7580330d,
0x94074251, 0x5c7dcdfa, 0xabbe6d63, 0xaa402164, 0xb301d40a, 0x02e7d1ca, 0x53571dae, 0x7a3182a2,
0x12a8ddec, 0xfdaa335d, 0x176f43e8, 0x71fb46d4, 0x38129022, 0xce949ad4, 0xb84769ad, 0x965bd862,
0x82f3d055, 0x66fb9767, 0x15b80b4e, 0x1d5b47a0, 0x4cfde06f, 0xc28ec4b8, 0x57e8726e, 0x647a78fc,
0x99865d44, 0x608bd593, 0x6c200e03, 0x39dc5ff6, 0x5d0b00a3, 0xae63aff2, 0x7e8bd632, 0x70108c0c,
0xbbd35049, 0x2998df04, 0x980cf42a, 0x9b6df491, 0x9e7edd53, 0x06918548, 0x58cb7e07, 0x3b74ef2e,
0x522fffb1, 0xd24708cc, 0x1c7e27cd, 0xa4eb215b, 0x3cf1d2e2, 0x19b47a38, 0x424f7618, 0x35856039,
0x9d17dee7, 0x27eb35e6, 0xc9aff67b, 0x36baf5b8, 0x09c467cd, 0xc18910b1, 0xe11dbf7b, 0x06cd1af8,
0x7170c608, 0x2d5e3354, 0xd4de495a, 0x64c6d006, 0xbcc0c62c, 0x3dd00db3, 0x708f8f34, 0x77d51b42,
0x264f620f, 0x24b8d2bf, 0x15c1b79e, 0x46a52564, 0xf8d7e54e, 0x3e378160, 0x7895cda5, 0x859c15a5,
0xe6459788, 0xc37bc75f, 0xdb07ba0c, 0x0676a3ab, 0x7f229b1e, 0x31842e7b, 0x24259fd7, 0xf8bef472,
0x835ffcb8, 0x6df4c1f2, 0x96f5b195, 0xfd0af0fc, 0xb0fe134c, 0xe2506d3d, 0x4f9b12ea, 0xf215f225,
0xa223736f, 0x9fb4c428, 0x25d04979, 0x34c713f8, 0xc4618187, 0xea7a6e98, 0x7cd16efc, 0x1436876c,
0xf1544107, 0xbedeee14, 0x56e9af27, 0xa04aa441, 0x3cf7c899, 0x92ecbae6, 0xdd67016d, 0x151682eb,
0xa842eedf, 0xfdba60b4, 0xf1907b75, 0x20e3030f, 0x24d8c29e, 0xe139673b, 0xefa63fb8, 0x71873054,
0xb6f2cf3b, 0x9f326442, 0xcb15a4cc, 0xb01a4504, 0xf1e47d8d, 0x844a1be5, 0xbae7dfdc, 0x42cbda70,
0xcd7dae0a, 0x57e85b7a, 0xd53f5af6, 0x20cf4d8c, 0xcea4d428, 0x79d130a4, 0x3486ebfb, 0x33d3cddc,
0x77853b53, 0x37effcb5, 0xc5068778, 0xe580b3e6, 0x4e68b8f4, 0xc5c8b37e, 0x0d809ea2, 0x398feb7c,
0x132a4f94, 0x43b7950e, 0x2fee7d1c, 0x223613bd, 0xdd06caa2, 0x37df932b, 0xc4248289, 0xacf3ebc3,
0x5715f6b7, 0xef3478dd, 0xf267616f, 0xc148cbe4, 0x9052815e, 0x5e410fab, 0xb48a2465, 0x2eda7fa4,
0xe87b40e4, 0xe98ea084, 0x5889e9e1, 0xefd390fc, 0xdd07d35b, 0xdb485694, 0x38d7e5b2, 0x57720101,
0x730edebc, 0x5b643113, 0x94917e4f, 0x503c2fba, 0x646f1282, 0x7523d24a, 0xe0779695, 0xf9c17a8f,
0x7a5b2121, 0xd187b896, 0x29263a4d, 0xba510cdf, 0x81f47c9f, 0xad1163ed, 0xea7b5965, 0x1a00726e,
0x11403092, 0x00da6d77, 0x4a0cdd61, 0xad1f4603, 0x605bdfb0, 0x9eedc364, 0x22ebe6a8, 0xcee7d28a,
0xa0e736a0, 0x5564a6b9, 0x10853209, 0xc7eb8f37, 0x2de705ca, 0x8951570f, 0xdf09822b, 0xbd691a6c,
0xaa12e4f2, 0x87451c0f, 0xe0f6a27a, 0x3ada4819, 0x4cf1764f, 0x0d771c2b, 0x67cdb156, 0x350d8384,
0x5938fa0f, 0x42399ef3, 0x36997b07, 0x0e84093d, 0x4aa93e61, 0x8360d87b, 0x1fa98b0c, 0x1149382c,
0xe97625a5, 0x0614d1b7, 0x0e25244b, 0x0c768347, 0x589e8d82, 0x0d2059d1, 0xa466bb1e, 0xf8da0a82,
0x04f19130, 0xba6e4ec0, 0x99265164, 0x1ee7230d, 0x50b2ad80, 0xeaee6801, 0x8db2a283, 0xea8bf59e,
},
}

219
vendor/golang.org/x/crypto/openpgp/armor/armor.go generated vendored Normal file
View File

@ -0,0 +1,219 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package armor implements OpenPGP ASCII Armor, see RFC 4880. OpenPGP Armor is
// very similar to PEM except that it has an additional CRC checksum.
package armor // import "golang.org/x/crypto/openpgp/armor"
import (
"bufio"
"bytes"
"encoding/base64"
"golang.org/x/crypto/openpgp/errors"
"io"
)
// A Block represents an OpenPGP armored structure.
//
// The encoded form is:
// -----BEGIN Type-----
// Headers
//
// base64-encoded Bytes
// '=' base64 encoded checksum
// -----END Type-----
// where Headers is a possibly empty sequence of Key: Value lines.
//
// Since the armored data can be very large, this package presents a streaming
// interface.
type Block struct {
Type string // The type, taken from the preamble (i.e. "PGP SIGNATURE").
Header map[string]string // Optional headers.
Body io.Reader // A Reader from which the contents can be read
lReader lineReader
oReader openpgpReader
}
var ArmorCorrupt error = errors.StructuralError("armor invalid")
const crc24Init = 0xb704ce
const crc24Poly = 0x1864cfb
const crc24Mask = 0xffffff
// crc24 calculates the OpenPGP checksum as specified in RFC 4880, section 6.1
func crc24(crc uint32, d []byte) uint32 {
for _, b := range d {
crc ^= uint32(b) << 16
for i := 0; i < 8; i++ {
crc <<= 1
if crc&0x1000000 != 0 {
crc ^= crc24Poly
}
}
}
return crc
}
var armorStart = []byte("-----BEGIN ")
var armorEnd = []byte("-----END ")
var armorEndOfLine = []byte("-----")
// lineReader wraps a line based reader. It watches for the end of an armor
// block and records the expected CRC value.
type lineReader struct {
in *bufio.Reader
buf []byte
eof bool
crc uint32
}
func (l *lineReader) Read(p []byte) (n int, err error) {
if l.eof {
return 0, io.EOF
}
if len(l.buf) > 0 {
n = copy(p, l.buf)
l.buf = l.buf[n:]
return
}
line, isPrefix, err := l.in.ReadLine()
if err != nil {
return
}
if isPrefix {
return 0, ArmorCorrupt
}
if len(line) == 5 && line[0] == '=' {
// This is the checksum line
var expectedBytes [3]byte
var m int
m, err = base64.StdEncoding.Decode(expectedBytes[0:], line[1:])
if m != 3 || err != nil {
return
}
l.crc = uint32(expectedBytes[0])<<16 |
uint32(expectedBytes[1])<<8 |
uint32(expectedBytes[2])
line, _, err = l.in.ReadLine()
if err != nil && err != io.EOF {
return
}
if !bytes.HasPrefix(line, armorEnd) {
return 0, ArmorCorrupt
}
l.eof = true
return 0, io.EOF
}
if len(line) > 96 {
return 0, ArmorCorrupt
}
n = copy(p, line)
bytesToSave := len(line) - n
if bytesToSave > 0 {
if cap(l.buf) < bytesToSave {
l.buf = make([]byte, 0, bytesToSave)
}
l.buf = l.buf[0:bytesToSave]
copy(l.buf, line[n:])
}
return
}
// openpgpReader passes Read calls to the underlying base64 decoder, but keeps
// a running CRC of the resulting data and checks the CRC against the value
// found by the lineReader at EOF.
type openpgpReader struct {
lReader *lineReader
b64Reader io.Reader
currentCRC uint32
}
func (r *openpgpReader) Read(p []byte) (n int, err error) {
n, err = r.b64Reader.Read(p)
r.currentCRC = crc24(r.currentCRC, p[:n])
if err == io.EOF {
if r.lReader.crc != uint32(r.currentCRC&crc24Mask) {
return 0, ArmorCorrupt
}
}
return
}
// Decode reads a PGP armored block from the given Reader. It will ignore
// leading garbage. If it doesn't find a block, it will return nil, io.EOF. The
// given Reader is not usable after calling this function: an arbitrary amount
// of data may have been read past the end of the block.
func Decode(in io.Reader) (p *Block, err error) {
r := bufio.NewReaderSize(in, 100)
var line []byte
ignoreNext := false
TryNextBlock:
p = nil
// Skip leading garbage
for {
ignoreThis := ignoreNext
line, ignoreNext, err = r.ReadLine()
if err != nil {
return
}
if ignoreNext || ignoreThis {
continue
}
line = bytes.TrimSpace(line)
if len(line) > len(armorStart)+len(armorEndOfLine) && bytes.HasPrefix(line, armorStart) {
break
}
}
p = new(Block)
p.Type = string(line[len(armorStart) : len(line)-len(armorEndOfLine)])
p.Header = make(map[string]string)
nextIsContinuation := false
var lastKey string
// Read headers
for {
isContinuation := nextIsContinuation
line, nextIsContinuation, err = r.ReadLine()
if err != nil {
p = nil
return
}
if isContinuation {
p.Header[lastKey] += string(line)
continue
}
line = bytes.TrimSpace(line)
if len(line) == 0 {
break
}
i := bytes.Index(line, []byte(": "))
if i == -1 {
goto TryNextBlock
}
lastKey = string(line[:i])
p.Header[lastKey] = string(line[i+2:])
}
p.lReader.in = r
p.oReader.currentCRC = crc24Init
p.oReader.lReader = &p.lReader
p.oReader.b64Reader = base64.NewDecoder(base64.StdEncoding, &p.lReader)
p.Body = &p.oReader
return
}

160
vendor/golang.org/x/crypto/openpgp/armor/encode.go generated vendored Normal file
View File

@ -0,0 +1,160 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package armor
import (
"encoding/base64"
"io"
)
var armorHeaderSep = []byte(": ")
var blockEnd = []byte("\n=")
var newline = []byte("\n")
var armorEndOfLineOut = []byte("-----\n")
// writeSlices writes its arguments to the given Writer.
func writeSlices(out io.Writer, slices ...[]byte) (err error) {
for _, s := range slices {
_, err = out.Write(s)
if err != nil {
return err
}
}
return
}
// lineBreaker breaks data across several lines, all of the same byte length
// (except possibly the last). Lines are broken with a single '\n'.
type lineBreaker struct {
lineLength int
line []byte
used int
out io.Writer
haveWritten bool
}
func newLineBreaker(out io.Writer, lineLength int) *lineBreaker {
return &lineBreaker{
lineLength: lineLength,
line: make([]byte, lineLength),
used: 0,
out: out,
}
}
func (l *lineBreaker) Write(b []byte) (n int, err error) {
n = len(b)
if n == 0 {
return
}
if l.used == 0 && l.haveWritten {
_, err = l.out.Write([]byte{'\n'})
if err != nil {
return
}
}
if l.used+len(b) < l.lineLength {
l.used += copy(l.line[l.used:], b)
return
}
l.haveWritten = true
_, err = l.out.Write(l.line[0:l.used])
if err != nil {
return
}
excess := l.lineLength - l.used
l.used = 0
_, err = l.out.Write(b[0:excess])
if err != nil {
return
}
_, err = l.Write(b[excess:])
return
}
func (l *lineBreaker) Close() (err error) {
if l.used > 0 {
_, err = l.out.Write(l.line[0:l.used])
if err != nil {
return
}
}
return
}
// encoding keeps track of a running CRC24 over the data which has been written
// to it and outputs a OpenPGP checksum when closed, followed by an armor
// trailer.
//
// It's built into a stack of io.Writers:
// encoding -> base64 encoder -> lineBreaker -> out
type encoding struct {
out io.Writer
breaker *lineBreaker
b64 io.WriteCloser
crc uint32
blockType []byte
}
func (e *encoding) Write(data []byte) (n int, err error) {
e.crc = crc24(e.crc, data)
return e.b64.Write(data)
}
func (e *encoding) Close() (err error) {
err = e.b64.Close()
if err != nil {
return
}
e.breaker.Close()
var checksumBytes [3]byte
checksumBytes[0] = byte(e.crc >> 16)
checksumBytes[1] = byte(e.crc >> 8)
checksumBytes[2] = byte(e.crc)
var b64ChecksumBytes [4]byte
base64.StdEncoding.Encode(b64ChecksumBytes[:], checksumBytes[:])
return writeSlices(e.out, blockEnd, b64ChecksumBytes[:], newline, armorEnd, e.blockType, armorEndOfLine)
}
// Encode returns a WriteCloser which will encode the data written to it in
// OpenPGP armor.
func Encode(out io.Writer, blockType string, headers map[string]string) (w io.WriteCloser, err error) {
bType := []byte(blockType)
err = writeSlices(out, armorStart, bType, armorEndOfLineOut)
if err != nil {
return
}
for k, v := range headers {
err = writeSlices(out, []byte(k), armorHeaderSep, []byte(v), newline)
if err != nil {
return
}
}
_, err = out.Write(newline)
if err != nil {
return
}
e := &encoding{
out: out,
breaker: newLineBreaker(out, 64),
crc: crc24Init,
blockType: bType,
}
e.b64 = base64.NewEncoder(base64.StdEncoding, e.breaker)
return e, nil
}

59
vendor/golang.org/x/crypto/openpgp/canonical_text.go generated vendored Normal file
View File

@ -0,0 +1,59 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package openpgp
import "hash"
// NewCanonicalTextHash reformats text written to it into the canonical
// form and then applies the hash h. See RFC 4880, section 5.2.1.
func NewCanonicalTextHash(h hash.Hash) hash.Hash {
return &canonicalTextHash{h, 0}
}
type canonicalTextHash struct {
h hash.Hash
s int
}
var newline = []byte{'\r', '\n'}
func (cth *canonicalTextHash) Write(buf []byte) (int, error) {
start := 0
for i, c := range buf {
switch cth.s {
case 0:
if c == '\r' {
cth.s = 1
} else if c == '\n' {
cth.h.Write(buf[start:i])
cth.h.Write(newline)
start = i + 1
}
case 1:
cth.s = 0
}
}
cth.h.Write(buf[start:])
return len(buf), nil
}
func (cth *canonicalTextHash) Sum(in []byte) []byte {
return cth.h.Sum(in)
}
func (cth *canonicalTextHash) Reset() {
cth.h.Reset()
cth.s = 0
}
func (cth *canonicalTextHash) Size() int {
return cth.h.Size()
}
func (cth *canonicalTextHash) BlockSize() int {
return cth.h.BlockSize()
}

122
vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go generated vendored Normal file
View File

@ -0,0 +1,122 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package elgamal implements ElGamal encryption, suitable for OpenPGP,
// as specified in "A Public-Key Cryptosystem and a Signature Scheme Based on
// Discrete Logarithms," IEEE Transactions on Information Theory, v. IT-31,
// n. 4, 1985, pp. 469-472.
//
// This form of ElGamal embeds PKCS#1 v1.5 padding, which may make it
// unsuitable for other protocols. RSA should be used in preference in any
// case.
package elgamal // import "golang.org/x/crypto/openpgp/elgamal"
import (
"crypto/rand"
"crypto/subtle"
"errors"
"io"
"math/big"
)
// PublicKey represents an ElGamal public key.
type PublicKey struct {
G, P, Y *big.Int
}
// PrivateKey represents an ElGamal private key.
type PrivateKey struct {
PublicKey
X *big.Int
}
// Encrypt encrypts the given message to the given public key. The result is a
// pair of integers. Errors can result from reading random, or because msg is
// too large to be encrypted to the public key.
func Encrypt(random io.Reader, pub *PublicKey, msg []byte) (c1, c2 *big.Int, err error) {
pLen := (pub.P.BitLen() + 7) / 8
if len(msg) > pLen-11 {
err = errors.New("elgamal: message too long")
return
}
// EM = 0x02 || PS || 0x00 || M
em := make([]byte, pLen-1)
em[0] = 2
ps, mm := em[1:len(em)-len(msg)-1], em[len(em)-len(msg):]
err = nonZeroRandomBytes(ps, random)
if err != nil {
return
}
em[len(em)-len(msg)-1] = 0
copy(mm, msg)
m := new(big.Int).SetBytes(em)
k, err := rand.Int(random, pub.P)
if err != nil {
return
}
c1 = new(big.Int).Exp(pub.G, k, pub.P)
s := new(big.Int).Exp(pub.Y, k, pub.P)
c2 = s.Mul(s, m)
c2.Mod(c2, pub.P)
return
}
// Decrypt takes two integers, resulting from an ElGamal encryption, and
// returns the plaintext of the message. An error can result only if the
// ciphertext is invalid. Users should keep in mind that this is a padding
// oracle and thus, if exposed to an adaptive chosen ciphertext attack, can
// be used to break the cryptosystem. See ``Chosen Ciphertext Attacks
// Against Protocols Based on the RSA Encryption Standard PKCS #1'', Daniel
// Bleichenbacher, Advances in Cryptology (Crypto '98),
func Decrypt(priv *PrivateKey, c1, c2 *big.Int) (msg []byte, err error) {
s := new(big.Int).Exp(c1, priv.X, priv.P)
s.ModInverse(s, priv.P)
s.Mul(s, c2)
s.Mod(s, priv.P)
em := s.Bytes()
firstByteIsTwo := subtle.ConstantTimeByteEq(em[0], 2)
// The remainder of the plaintext must be a string of non-zero random
// octets, followed by a 0, followed by the message.
// lookingForIndex: 1 iff we are still looking for the zero.
// index: the offset of the first zero byte.
var lookingForIndex, index int
lookingForIndex = 1
for i := 1; i < len(em); i++ {
equals0 := subtle.ConstantTimeByteEq(em[i], 0)
index = subtle.ConstantTimeSelect(lookingForIndex&equals0, i, index)
lookingForIndex = subtle.ConstantTimeSelect(equals0, 0, lookingForIndex)
}
if firstByteIsTwo != 1 || lookingForIndex != 0 || index < 9 {
return nil, errors.New("elgamal: decryption error")
}
return em[index+1:], nil
}
// nonZeroRandomBytes fills the given slice with non-zero random octets.
func nonZeroRandomBytes(s []byte, rand io.Reader) (err error) {
_, err = io.ReadFull(rand, s)
if err != nil {
return
}
for i := 0; i < len(s); i++ {
for s[i] == 0 {
_, err = io.ReadFull(rand, s[i:i+1])
if err != nil {
return
}
}
}
return
}

72
vendor/golang.org/x/crypto/openpgp/errors/errors.go generated vendored Normal file
View File

@ -0,0 +1,72 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package errors contains common error types for the OpenPGP packages.
package errors // import "golang.org/x/crypto/openpgp/errors"
import (
"strconv"
)
// A StructuralError is returned when OpenPGP data is found to be syntactically
// invalid.
type StructuralError string
func (s StructuralError) Error() string {
return "openpgp: invalid data: " + string(s)
}
// UnsupportedError indicates that, although the OpenPGP data is valid, it
// makes use of currently unimplemented features.
type UnsupportedError string
func (s UnsupportedError) Error() string {
return "openpgp: unsupported feature: " + string(s)
}
// InvalidArgumentError indicates that the caller is in error and passed an
// incorrect value.
type InvalidArgumentError string
func (i InvalidArgumentError) Error() string {
return "openpgp: invalid argument: " + string(i)
}
// SignatureError indicates that a syntactically valid signature failed to
// validate.
type SignatureError string
func (b SignatureError) Error() string {
return "openpgp: invalid signature: " + string(b)
}
type keyIncorrectError int
func (ki keyIncorrectError) Error() string {
return "openpgp: incorrect key"
}
var ErrKeyIncorrect error = keyIncorrectError(0)
type unknownIssuerError int
func (unknownIssuerError) Error() string {
return "openpgp: signature made by unknown entity"
}
var ErrUnknownIssuer error = unknownIssuerError(0)
type keyRevokedError int
func (keyRevokedError) Error() string {
return "openpgp: signature made by revoked key"
}
var ErrKeyRevoked error = keyRevokedError(0)
type UnknownPacketTypeError uint8
func (upte UnknownPacketTypeError) Error() string {
return "openpgp: unknown packet type: " + strconv.Itoa(int(upte))
}

639
vendor/golang.org/x/crypto/openpgp/keys.go generated vendored Normal file
View File

@ -0,0 +1,639 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package openpgp
import (
"crypto/rsa"
"io"
"time"
"golang.org/x/crypto/openpgp/armor"
"golang.org/x/crypto/openpgp/errors"
"golang.org/x/crypto/openpgp/packet"
)
// PublicKeyType is the armor type for a PGP public key.
var PublicKeyType = "PGP PUBLIC KEY BLOCK"
// PrivateKeyType is the armor type for a PGP private key.
var PrivateKeyType = "PGP PRIVATE KEY BLOCK"
// An Entity represents the components of an OpenPGP key: a primary public key
// (which must be a signing key), one or more identities claimed by that key,
// and zero or more subkeys, which may be encryption keys.
type Entity struct {
PrimaryKey *packet.PublicKey
PrivateKey *packet.PrivateKey
Identities map[string]*Identity // indexed by Identity.Name
Revocations []*packet.Signature
Subkeys []Subkey
}
// An Identity represents an identity claimed by an Entity and zero or more
// assertions by other entities about that claim.
type Identity struct {
Name string // by convention, has the form "Full Name (comment) <email@example.com>"
UserId *packet.UserId
SelfSignature *packet.Signature
Signatures []*packet.Signature
}
// A Subkey is an additional public key in an Entity. Subkeys can be used for
// encryption.
type Subkey struct {
PublicKey *packet.PublicKey
PrivateKey *packet.PrivateKey
Sig *packet.Signature
}
// A Key identifies a specific public key in an Entity. This is either the
// Entity's primary key or a subkey.
type Key struct {
Entity *Entity
PublicKey *packet.PublicKey
PrivateKey *packet.PrivateKey
SelfSignature *packet.Signature
}
// A KeyRing provides access to public and private keys.
type KeyRing interface {
// KeysById returns the set of keys that have the given key id.
KeysById(id uint64) []Key
// KeysByIdAndUsage returns the set of keys with the given id
// that also meet the key usage given by requiredUsage.
// The requiredUsage is expressed as the bitwise-OR of
// packet.KeyFlag* values.
KeysByIdUsage(id uint64, requiredUsage byte) []Key
// DecryptionKeys returns all private keys that are valid for
// decryption.
DecryptionKeys() []Key
}
// primaryIdentity returns the Identity marked as primary or the first identity
// if none are so marked.
func (e *Entity) primaryIdentity() *Identity {
var firstIdentity *Identity
for _, ident := range e.Identities {
if firstIdentity == nil {
firstIdentity = ident
}
if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId {
return ident
}
}
return firstIdentity
}
// encryptionKey returns the best candidate Key for encrypting a message to the
// given Entity.
func (e *Entity) encryptionKey(now time.Time) (Key, bool) {
candidateSubkey := -1
// Iterate the keys to find the newest key
var maxTime time.Time
for i, subkey := range e.Subkeys {
if subkey.Sig.FlagsValid &&
subkey.Sig.FlagEncryptCommunications &&
subkey.PublicKey.PubKeyAlgo.CanEncrypt() &&
!subkey.Sig.KeyExpired(now) &&
(maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) {
candidateSubkey = i
maxTime = subkey.Sig.CreationTime
}
}
if candidateSubkey != -1 {
subkey := e.Subkeys[candidateSubkey]
return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true
}
// If we don't have any candidate subkeys for encryption and
// the primary key doesn't have any usage metadata then we
// assume that the primary key is ok. Or, if the primary key is
// marked as ok to encrypt to, then we can obviously use it.
i := e.primaryIdentity()
if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagEncryptCommunications &&
e.PrimaryKey.PubKeyAlgo.CanEncrypt() &&
!i.SelfSignature.KeyExpired(now) {
return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true
}
// This Entity appears to be signing only.
return Key{}, false
}
// signingKey return the best candidate Key for signing a message with this
// Entity.
func (e *Entity) signingKey(now time.Time) (Key, bool) {
candidateSubkey := -1
for i, subkey := range e.Subkeys {
if subkey.Sig.FlagsValid &&
subkey.Sig.FlagSign &&
subkey.PublicKey.PubKeyAlgo.CanSign() &&
!subkey.Sig.KeyExpired(now) {
candidateSubkey = i
break
}
}
if candidateSubkey != -1 {
subkey := e.Subkeys[candidateSubkey]
return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true
}
// If we have no candidate subkey then we assume that it's ok to sign
// with the primary key.
i := e.primaryIdentity()
if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagSign &&
!i.SelfSignature.KeyExpired(now) {
return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true
}
return Key{}, false
}
// An EntityList contains one or more Entities.
type EntityList []*Entity
// KeysById returns the set of keys that have the given key id.
func (el EntityList) KeysById(id uint64) (keys []Key) {
for _, e := range el {
if e.PrimaryKey.KeyId == id {
var selfSig *packet.Signature
for _, ident := range e.Identities {
if selfSig == nil {
selfSig = ident.SelfSignature
} else if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId {
selfSig = ident.SelfSignature
break
}
}
keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig})
}
for _, subKey := range e.Subkeys {
if subKey.PublicKey.KeyId == id {
keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig})
}
}
}
return
}
// KeysByIdAndUsage returns the set of keys with the given id that also meet
// the key usage given by requiredUsage. The requiredUsage is expressed as
// the bitwise-OR of packet.KeyFlag* values.
func (el EntityList) KeysByIdUsage(id uint64, requiredUsage byte) (keys []Key) {
for _, key := range el.KeysById(id) {
if len(key.Entity.Revocations) > 0 {
continue
}
if key.SelfSignature.RevocationReason != nil {
continue
}
if key.SelfSignature.FlagsValid && requiredUsage != 0 {
var usage byte
if key.SelfSignature.FlagCertify {
usage |= packet.KeyFlagCertify
}
if key.SelfSignature.FlagSign {
usage |= packet.KeyFlagSign
}
if key.SelfSignature.FlagEncryptCommunications {
usage |= packet.KeyFlagEncryptCommunications
}
if key.SelfSignature.FlagEncryptStorage {
usage |= packet.KeyFlagEncryptStorage
}
if usage&requiredUsage != requiredUsage {
continue
}
}
keys = append(keys, key)
}
return
}
// DecryptionKeys returns all private keys that are valid for decryption.
func (el EntityList) DecryptionKeys() (keys []Key) {
for _, e := range el {
for _, subKey := range e.Subkeys {
if subKey.PrivateKey != nil && (!subKey.Sig.FlagsValid || subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications) {
keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig})
}
}
}
return
}
// ReadArmoredKeyRing reads one or more public/private keys from an armor keyring file.
func ReadArmoredKeyRing(r io.Reader) (EntityList, error) {
block, err := armor.Decode(r)
if err == io.EOF {
return nil, errors.InvalidArgumentError("no armored data found")
}
if err != nil {
return nil, err
}
if block.Type != PublicKeyType && block.Type != PrivateKeyType {
return nil, errors.InvalidArgumentError("expected public or private key block, got: " + block.Type)
}
return ReadKeyRing(block.Body)
}
// ReadKeyRing reads one or more public/private keys. Unsupported keys are
// ignored as long as at least a single valid key is found.
func ReadKeyRing(r io.Reader) (el EntityList, err error) {
packets := packet.NewReader(r)
var lastUnsupportedError error
for {
var e *Entity
e, err = ReadEntity(packets)
if err != nil {
// TODO: warn about skipped unsupported/unreadable keys
if _, ok := err.(errors.UnsupportedError); ok {
lastUnsupportedError = err
err = readToNextPublicKey(packets)
} else if _, ok := err.(errors.StructuralError); ok {
// Skip unreadable, badly-formatted keys
lastUnsupportedError = err
err = readToNextPublicKey(packets)
}
if err == io.EOF {
err = nil
break
}
if err != nil {
el = nil
break
}
} else {
el = append(el, e)
}
}
if len(el) == 0 && err == nil {
err = lastUnsupportedError
}
return
}
// readToNextPublicKey reads packets until the start of the entity and leaves
// the first packet of the new entity in the Reader.
func readToNextPublicKey(packets *packet.Reader) (err error) {
var p packet.Packet
for {
p, err = packets.Next()
if err == io.EOF {
return
} else if err != nil {
if _, ok := err.(errors.UnsupportedError); ok {
err = nil
continue
}
return
}
if pk, ok := p.(*packet.PublicKey); ok && !pk.IsSubkey {
packets.Unread(p)
return
}
}
panic("unreachable")
}
// ReadEntity reads an entity (public key, identities, subkeys etc) from the
// given Reader.
func ReadEntity(packets *packet.Reader) (*Entity, error) {
e := new(Entity)
e.Identities = make(map[string]*Identity)
p, err := packets.Next()
if err != nil {
return nil, err
}
var ok bool
if e.PrimaryKey, ok = p.(*packet.PublicKey); !ok {
if e.PrivateKey, ok = p.(*packet.PrivateKey); !ok {
packets.Unread(p)
return nil, errors.StructuralError("first packet was not a public/private key")
} else {
e.PrimaryKey = &e.PrivateKey.PublicKey
}
}
if !e.PrimaryKey.PubKeyAlgo.CanSign() {
return nil, errors.StructuralError("primary key cannot be used for signatures")
}
var current *Identity
var revocations []*packet.Signature
EachPacket:
for {
p, err := packets.Next()
if err == io.EOF {
break
} else if err != nil {
return nil, err
}
switch pkt := p.(type) {
case *packet.UserId:
current = new(Identity)
current.Name = pkt.Id
current.UserId = pkt
e.Identities[pkt.Id] = current
for {
p, err = packets.Next()
if err == io.EOF {
return nil, io.ErrUnexpectedEOF
} else if err != nil {
return nil, err
}
sig, ok := p.(*packet.Signature)
if !ok {
return nil, errors.StructuralError("user ID packet not followed by self-signature")
}
if (sig.SigType == packet.SigTypePositiveCert || sig.SigType == packet.SigTypeGenericCert) && sig.IssuerKeyId != nil && *sig.IssuerKeyId == e.PrimaryKey.KeyId {
if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, e.PrimaryKey, sig); err != nil {
return nil, errors.StructuralError("user ID self-signature invalid: " + err.Error())
}
current.SelfSignature = sig
break
}
current.Signatures = append(current.Signatures, sig)
}
case *packet.Signature:
if pkt.SigType == packet.SigTypeKeyRevocation {
revocations = append(revocations, pkt)
} else if pkt.SigType == packet.SigTypeDirectSignature {
// TODO: RFC4880 5.2.1 permits signatures
// directly on keys (eg. to bind additional
// revocation keys).
} else if current == nil {
return nil, errors.StructuralError("signature packet found before user id packet")
} else {
current.Signatures = append(current.Signatures, pkt)
}
case *packet.PrivateKey:
if pkt.IsSubkey == false {
packets.Unread(p)
break EachPacket
}
err = addSubkey(e, packets, &pkt.PublicKey, pkt)
if err != nil {
return nil, err
}
case *packet.PublicKey:
if pkt.IsSubkey == false {
packets.Unread(p)
break EachPacket
}
err = addSubkey(e, packets, pkt, nil)
if err != nil {
return nil, err
}
default:
// we ignore unknown packets
}
}
if len(e.Identities) == 0 {
return nil, errors.StructuralError("entity without any identities")
}
for _, revocation := range revocations {
err = e.PrimaryKey.VerifyRevocationSignature(revocation)
if err == nil {
e.Revocations = append(e.Revocations, revocation)
} else {
// TODO: RFC 4880 5.2.3.15 defines revocation keys.
return nil, errors.StructuralError("revocation signature signed by alternate key")
}
}
return e, nil
}
func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *packet.PrivateKey) error {
var subKey Subkey
subKey.PublicKey = pub
subKey.PrivateKey = priv
p, err := packets.Next()
if err == io.EOF {
return io.ErrUnexpectedEOF
}
if err != nil {
return errors.StructuralError("subkey signature invalid: " + err.Error())
}
var ok bool
subKey.Sig, ok = p.(*packet.Signature)
if !ok {
return errors.StructuralError("subkey packet not followed by signature")
}
if subKey.Sig.SigType != packet.SigTypeSubkeyBinding && subKey.Sig.SigType != packet.SigTypeSubkeyRevocation {
return errors.StructuralError("subkey signature with wrong type")
}
err = e.PrimaryKey.VerifyKeySignature(subKey.PublicKey, subKey.Sig)
if err != nil {
return errors.StructuralError("subkey signature invalid: " + err.Error())
}
e.Subkeys = append(e.Subkeys, subKey)
return nil
}
const defaultRSAKeyBits = 2048
// NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a
// single identity composed of the given full name, comment and email, any of
// which may be empty but must not contain any of "()<>\x00".
// If config is nil, sensible defaults will be used.
func NewEntity(name, comment, email string, config *packet.Config) (*Entity, error) {
currentTime := config.Now()
bits := defaultRSAKeyBits
if config != nil && config.RSABits != 0 {
bits = config.RSABits
}
uid := packet.NewUserId(name, comment, email)
if uid == nil {
return nil, errors.InvalidArgumentError("user id field contained invalid characters")
}
signingPriv, err := rsa.GenerateKey(config.Random(), bits)
if err != nil {
return nil, err
}
encryptingPriv, err := rsa.GenerateKey(config.Random(), bits)
if err != nil {
return nil, err
}
e := &Entity{
PrimaryKey: packet.NewRSAPublicKey(currentTime, &signingPriv.PublicKey),
PrivateKey: packet.NewRSAPrivateKey(currentTime, signingPriv),
Identities: make(map[string]*Identity),
}
isPrimaryId := true
e.Identities[uid.Id] = &Identity{
Name: uid.Name,
UserId: uid,
SelfSignature: &packet.Signature{
CreationTime: currentTime,
SigType: packet.SigTypePositiveCert,
PubKeyAlgo: packet.PubKeyAlgoRSA,
Hash: config.Hash(),
IsPrimaryId: &isPrimaryId,
FlagsValid: true,
FlagSign: true,
FlagCertify: true,
IssuerKeyId: &e.PrimaryKey.KeyId,
},
}
// If the user passes in a DefaultHash via packet.Config,
// set the PreferredHash for the SelfSignature.
if config != nil && config.DefaultHash != 0 {
e.Identities[uid.Id].SelfSignature.PreferredHash = []uint8{hashToHashId(config.DefaultHash)}
}
e.Subkeys = make([]Subkey, 1)
e.Subkeys[0] = Subkey{
PublicKey: packet.NewRSAPublicKey(currentTime, &encryptingPriv.PublicKey),
PrivateKey: packet.NewRSAPrivateKey(currentTime, encryptingPriv),
Sig: &packet.Signature{
CreationTime: currentTime,
SigType: packet.SigTypeSubkeyBinding,
PubKeyAlgo: packet.PubKeyAlgoRSA,
Hash: config.Hash(),
FlagsValid: true,
FlagEncryptStorage: true,
FlagEncryptCommunications: true,
IssuerKeyId: &e.PrimaryKey.KeyId,
},
}
e.Subkeys[0].PublicKey.IsSubkey = true
e.Subkeys[0].PrivateKey.IsSubkey = true
return e, nil
}
// SerializePrivate serializes an Entity, including private key material, to
// the given Writer. For now, it must only be used on an Entity returned from
// NewEntity.
// If config is nil, sensible defaults will be used.
func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error) {
err = e.PrivateKey.Serialize(w)
if err != nil {
return
}
for _, ident := range e.Identities {
err = ident.UserId.Serialize(w)
if err != nil {
return
}
err = ident.SelfSignature.SignUserId(ident.UserId.Id, e.PrimaryKey, e.PrivateKey, config)
if err != nil {
return
}
err = ident.SelfSignature.Serialize(w)
if err != nil {
return
}
}
for _, subkey := range e.Subkeys {
err = subkey.PrivateKey.Serialize(w)
if err != nil {
return
}
err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config)
if err != nil {
return
}
err = subkey.Sig.Serialize(w)
if err != nil {
return
}
}
return nil
}
// Serialize writes the public part of the given Entity to w. (No private
// key material will be output).
func (e *Entity) Serialize(w io.Writer) error {
err := e.PrimaryKey.Serialize(w)
if err != nil {
return err
}
for _, ident := range e.Identities {
err = ident.UserId.Serialize(w)
if err != nil {
return err
}
err = ident.SelfSignature.Serialize(w)
if err != nil {
return err
}
for _, sig := range ident.Signatures {
err = sig.Serialize(w)
if err != nil {
return err
}
}
}
for _, subkey := range e.Subkeys {
err = subkey.PublicKey.Serialize(w)
if err != nil {
return err
}
err = subkey.Sig.Serialize(w)
if err != nil {
return err
}
}
return nil
}
// SignIdentity adds a signature to e, from signer, attesting that identity is
// associated with e. The provided identity must already be an element of
// e.Identities and the private key of signer must have been decrypted if
// necessary.
// If config is nil, sensible defaults will be used.
func (e *Entity) SignIdentity(identity string, signer *Entity, config *packet.Config) error {
if signer.PrivateKey == nil {
return errors.InvalidArgumentError("signing Entity must have a private key")
}
if signer.PrivateKey.Encrypted {
return errors.InvalidArgumentError("signing Entity's private key must be decrypted")
}
ident, ok := e.Identities[identity]
if !ok {
return errors.InvalidArgumentError("given identity string not found in Entity")
}
sig := &packet.Signature{
SigType: packet.SigTypeGenericCert,
PubKeyAlgo: signer.PrivateKey.PubKeyAlgo,
Hash: config.Hash(),
CreationTime: config.Now(),
IssuerKeyId: &signer.PrivateKey.KeyId,
}
if err := sig.SignUserId(identity, e.PrimaryKey, signer.PrivateKey, config); err != nil {
return err
}
ident.Signatures = append(ident.Signatures, sig)
return nil
}

123
vendor/golang.org/x/crypto/openpgp/packet/compressed.go generated vendored Normal file
View File

@ -0,0 +1,123 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"compress/bzip2"
"compress/flate"
"compress/zlib"
"golang.org/x/crypto/openpgp/errors"
"io"
"strconv"
)
// Compressed represents a compressed OpenPGP packet. The decompressed contents
// will contain more OpenPGP packets. See RFC 4880, section 5.6.
type Compressed struct {
Body io.Reader
}
const (
NoCompression = flate.NoCompression
BestSpeed = flate.BestSpeed
BestCompression = flate.BestCompression
DefaultCompression = flate.DefaultCompression
)
// CompressionConfig contains compressor configuration settings.
type CompressionConfig struct {
// Level is the compression level to use. It must be set to
// between -1 and 9, with -1 causing the compressor to use the
// default compression level, 0 causing the compressor to use
// no compression and 1 to 9 representing increasing (better,
// slower) compression levels. If Level is less than -1 or
// more then 9, a non-nil error will be returned during
// encryption. See the constants above for convenient common
// settings for Level.
Level int
}
func (c *Compressed) parse(r io.Reader) error {
var buf [1]byte
_, err := readFull(r, buf[:])
if err != nil {
return err
}
switch buf[0] {
case 1:
c.Body = flate.NewReader(r)
case 2:
c.Body, err = zlib.NewReader(r)
case 3:
c.Body = bzip2.NewReader(r)
default:
err = errors.UnsupportedError("unknown compression algorithm: " + strconv.Itoa(int(buf[0])))
}
return err
}
// compressedWriterCloser represents the serialized compression stream
// header and the compressor. Its Close() method ensures that both the
// compressor and serialized stream header are closed. Its Write()
// method writes to the compressor.
type compressedWriteCloser struct {
sh io.Closer // Stream Header
c io.WriteCloser // Compressor
}
func (cwc compressedWriteCloser) Write(p []byte) (int, error) {
return cwc.c.Write(p)
}
func (cwc compressedWriteCloser) Close() (err error) {
err = cwc.c.Close()
if err != nil {
return err
}
return cwc.sh.Close()
}
// SerializeCompressed serializes a compressed data packet to w and
// returns a WriteCloser to which the literal data packets themselves
// can be written and which MUST be closed on completion. If cc is
// nil, sensible defaults will be used to configure the compression
// algorithm.
func SerializeCompressed(w io.WriteCloser, algo CompressionAlgo, cc *CompressionConfig) (literaldata io.WriteCloser, err error) {
compressed, err := serializeStreamHeader(w, packetTypeCompressed)
if err != nil {
return
}
_, err = compressed.Write([]byte{uint8(algo)})
if err != nil {
return
}
level := DefaultCompression
if cc != nil {
level = cc.Level
}
var compressor io.WriteCloser
switch algo {
case CompressionZIP:
compressor, err = flate.NewWriter(compressed, level)
case CompressionZLIB:
compressor, err = zlib.NewWriterLevel(compressed, level)
default:
s := strconv.Itoa(int(algo))
err = errors.UnsupportedError("Unsupported compression algorithm: " + s)
}
if err != nil {
return
}
literaldata = compressedWriteCloser{compressed, compressor}
return
}

91
vendor/golang.org/x/crypto/openpgp/packet/config.go generated vendored Normal file
View File

@ -0,0 +1,91 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"crypto"
"crypto/rand"
"io"
"time"
)
// Config collects a number of parameters along with sensible defaults.
// A nil *Config is valid and results in all default values.
type Config struct {
// Rand provides the source of entropy.
// If nil, the crypto/rand Reader is used.
Rand io.Reader
// DefaultHash is the default hash function to be used.
// If zero, SHA-256 is used.
DefaultHash crypto.Hash
// DefaultCipher is the cipher to be used.
// If zero, AES-128 is used.
DefaultCipher CipherFunction
// Time returns the current time as the number of seconds since the
// epoch. If Time is nil, time.Now is used.
Time func() time.Time
// DefaultCompressionAlgo is the compression algorithm to be
// applied to the plaintext before encryption. If zero, no
// compression is done.
DefaultCompressionAlgo CompressionAlgo
// CompressionConfig configures the compression settings.
CompressionConfig *CompressionConfig
// S2KCount is only used for symmetric encryption. It
// determines the strength of the passphrase stretching when
// the said passphrase is hashed to produce a key. S2KCount
// should be between 1024 and 65011712, inclusive. If Config
// is nil or S2KCount is 0, the value 65536 used. Not all
// values in the above range can be represented. S2KCount will
// be rounded up to the next representable value if it cannot
// be encoded exactly. When set, it is strongly encrouraged to
// use a value that is at least 65536. See RFC 4880 Section
// 3.7.1.3.
S2KCount int
// RSABits is the number of bits in new RSA keys made with NewEntity.
// If zero, then 2048 bit keys are created.
RSABits int
}
func (c *Config) Random() io.Reader {
if c == nil || c.Rand == nil {
return rand.Reader
}
return c.Rand
}
func (c *Config) Hash() crypto.Hash {
if c == nil || uint(c.DefaultHash) == 0 {
return crypto.SHA256
}
return c.DefaultHash
}
func (c *Config) Cipher() CipherFunction {
if c == nil || uint8(c.DefaultCipher) == 0 {
return CipherAES128
}
return c.DefaultCipher
}
func (c *Config) Now() time.Time {
if c == nil || c.Time == nil {
return time.Now()
}
return c.Time()
}
func (c *Config) Compression() CompressionAlgo {
if c == nil {
return CompressionNone
}
return c.DefaultCompressionAlgo
}
func (c *Config) PasswordHashIterations() int {
if c == nil || c.S2KCount == 0 {
return 0
}
return c.S2KCount
}

View File

@ -0,0 +1,199 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"crypto/rsa"
"encoding/binary"
"io"
"math/big"
"strconv"
"golang.org/x/crypto/openpgp/elgamal"
"golang.org/x/crypto/openpgp/errors"
)
const encryptedKeyVersion = 3
// EncryptedKey represents a public-key encrypted session key. See RFC 4880,
// section 5.1.
type EncryptedKey struct {
KeyId uint64
Algo PublicKeyAlgorithm
CipherFunc CipherFunction // only valid after a successful Decrypt
Key []byte // only valid after a successful Decrypt
encryptedMPI1, encryptedMPI2 parsedMPI
}
func (e *EncryptedKey) parse(r io.Reader) (err error) {
var buf [10]byte
_, err = readFull(r, buf[:])
if err != nil {
return
}
if buf[0] != encryptedKeyVersion {
return errors.UnsupportedError("unknown EncryptedKey version " + strconv.Itoa(int(buf[0])))
}
e.KeyId = binary.BigEndian.Uint64(buf[1:9])
e.Algo = PublicKeyAlgorithm(buf[9])
switch e.Algo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r)
case PubKeyAlgoElGamal:
e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r)
if err != nil {
return
}
e.encryptedMPI2.bytes, e.encryptedMPI2.bitLength, err = readMPI(r)
}
_, err = consumeAll(r)
return
}
func checksumKeyMaterial(key []byte) uint16 {
var checksum uint16
for _, v := range key {
checksum += uint16(v)
}
return checksum
}
// Decrypt decrypts an encrypted session key with the given private key. The
// private key must have been decrypted first.
// If config is nil, sensible defaults will be used.
func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error {
var err error
var b []byte
// TODO(agl): use session key decryption routines here to avoid
// padding oracle attacks.
switch priv.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
b, err = rsa.DecryptPKCS1v15(config.Random(), priv.PrivateKey.(*rsa.PrivateKey), e.encryptedMPI1.bytes)
case PubKeyAlgoElGamal:
c1 := new(big.Int).SetBytes(e.encryptedMPI1.bytes)
c2 := new(big.Int).SetBytes(e.encryptedMPI2.bytes)
b, err = elgamal.Decrypt(priv.PrivateKey.(*elgamal.PrivateKey), c1, c2)
default:
err = errors.InvalidArgumentError("cannot decrypted encrypted session key with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo)))
}
if err != nil {
return err
}
e.CipherFunc = CipherFunction(b[0])
e.Key = b[1 : len(b)-2]
expectedChecksum := uint16(b[len(b)-2])<<8 | uint16(b[len(b)-1])
checksum := checksumKeyMaterial(e.Key)
if checksum != expectedChecksum {
return errors.StructuralError("EncryptedKey checksum incorrect")
}
return nil
}
// Serialize writes the encrypted key packet, e, to w.
func (e *EncryptedKey) Serialize(w io.Writer) error {
var mpiLen int
switch e.Algo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
mpiLen = 2 + len(e.encryptedMPI1.bytes)
case PubKeyAlgoElGamal:
mpiLen = 2 + len(e.encryptedMPI1.bytes) + 2 + len(e.encryptedMPI2.bytes)
default:
return errors.InvalidArgumentError("don't know how to serialize encrypted key type " + strconv.Itoa(int(e.Algo)))
}
serializeHeader(w, packetTypeEncryptedKey, 1 /* version */ +8 /* key id */ +1 /* algo */ +mpiLen)
w.Write([]byte{encryptedKeyVersion})
binary.Write(w, binary.BigEndian, e.KeyId)
w.Write([]byte{byte(e.Algo)})
switch e.Algo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
writeMPIs(w, e.encryptedMPI1)
case PubKeyAlgoElGamal:
writeMPIs(w, e.encryptedMPI1, e.encryptedMPI2)
default:
panic("internal error")
}
return nil
}
// SerializeEncryptedKey serializes an encrypted key packet to w that contains
// key, encrypted to pub.
// If config is nil, sensible defaults will be used.
func SerializeEncryptedKey(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, config *Config) error {
var buf [10]byte
buf[0] = encryptedKeyVersion
binary.BigEndian.PutUint64(buf[1:9], pub.KeyId)
buf[9] = byte(pub.PubKeyAlgo)
keyBlock := make([]byte, 1 /* cipher type */ +len(key)+2 /* checksum */)
keyBlock[0] = byte(cipherFunc)
copy(keyBlock[1:], key)
checksum := checksumKeyMaterial(key)
keyBlock[1+len(key)] = byte(checksum >> 8)
keyBlock[1+len(key)+1] = byte(checksum)
switch pub.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
return serializeEncryptedKeyRSA(w, config.Random(), buf, pub.PublicKey.(*rsa.PublicKey), keyBlock)
case PubKeyAlgoElGamal:
return serializeEncryptedKeyElGamal(w, config.Random(), buf, pub.PublicKey.(*elgamal.PublicKey), keyBlock)
case PubKeyAlgoDSA, PubKeyAlgoRSASignOnly:
return errors.InvalidArgumentError("cannot encrypt to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo)))
}
return errors.UnsupportedError("encrypting a key to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo)))
}
func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header [10]byte, pub *rsa.PublicKey, keyBlock []byte) error {
cipherText, err := rsa.EncryptPKCS1v15(rand, pub, keyBlock)
if err != nil {
return errors.InvalidArgumentError("RSA encryption failed: " + err.Error())
}
packetLen := 10 /* header length */ + 2 /* mpi size */ + len(cipherText)
err = serializeHeader(w, packetTypeEncryptedKey, packetLen)
if err != nil {
return err
}
_, err = w.Write(header[:])
if err != nil {
return err
}
return writeMPI(w, 8*uint16(len(cipherText)), cipherText)
}
func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header [10]byte, pub *elgamal.PublicKey, keyBlock []byte) error {
c1, c2, err := elgamal.Encrypt(rand, pub, keyBlock)
if err != nil {
return errors.InvalidArgumentError("ElGamal encryption failed: " + err.Error())
}
packetLen := 10 /* header length */
packetLen += 2 /* mpi size */ + (c1.BitLen()+7)/8
packetLen += 2 /* mpi size */ + (c2.BitLen()+7)/8
err = serializeHeader(w, packetTypeEncryptedKey, packetLen)
if err != nil {
return err
}
_, err = w.Write(header[:])
if err != nil {
return err
}
err = writeBig(w, c1)
if err != nil {
return err
}
return writeBig(w, c2)
}

89
vendor/golang.org/x/crypto/openpgp/packet/literal.go generated vendored Normal file
View File

@ -0,0 +1,89 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"encoding/binary"
"io"
)
// LiteralData represents an encrypted file. See RFC 4880, section 5.9.
type LiteralData struct {
IsBinary bool
FileName string
Time uint32 // Unix epoch time. Either creation time or modification time. 0 means undefined.
Body io.Reader
}
// ForEyesOnly returns whether the contents of the LiteralData have been marked
// as especially sensitive.
func (l *LiteralData) ForEyesOnly() bool {
return l.FileName == "_CONSOLE"
}
func (l *LiteralData) parse(r io.Reader) (err error) {
var buf [256]byte
_, err = readFull(r, buf[:2])
if err != nil {
return
}
l.IsBinary = buf[0] == 'b'
fileNameLen := int(buf[1])
_, err = readFull(r, buf[:fileNameLen])
if err != nil {
return
}
l.FileName = string(buf[:fileNameLen])
_, err = readFull(r, buf[:4])
if err != nil {
return
}
l.Time = binary.BigEndian.Uint32(buf[:4])
l.Body = r
return
}
// SerializeLiteral serializes a literal data packet to w and returns a
// WriteCloser to which the data itself can be written and which MUST be closed
// on completion. The fileName is truncated to 255 bytes.
func SerializeLiteral(w io.WriteCloser, isBinary bool, fileName string, time uint32) (plaintext io.WriteCloser, err error) {
var buf [4]byte
buf[0] = 't'
if isBinary {
buf[0] = 'b'
}
if len(fileName) > 255 {
fileName = fileName[:255]
}
buf[1] = byte(len(fileName))
inner, err := serializeStreamHeader(w, packetTypeLiteralData)
if err != nil {
return
}
_, err = inner.Write(buf[:2])
if err != nil {
return
}
_, err = inner.Write([]byte(fileName))
if err != nil {
return
}
binary.BigEndian.PutUint32(buf[:], time)
_, err = inner.Write(buf[:])
if err != nil {
return
}
plaintext = inner
return
}

143
vendor/golang.org/x/crypto/openpgp/packet/ocfb.go generated vendored Normal file
View File

@ -0,0 +1,143 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// OpenPGP CFB Mode. http://tools.ietf.org/html/rfc4880#section-13.9
package packet
import (
"crypto/cipher"
)
type ocfbEncrypter struct {
b cipher.Block
fre []byte
outUsed int
}
// An OCFBResyncOption determines if the "resynchronization step" of OCFB is
// performed.
type OCFBResyncOption bool
const (
OCFBResync OCFBResyncOption = true
OCFBNoResync OCFBResyncOption = false
)
// NewOCFBEncrypter returns a cipher.Stream which encrypts data with OpenPGP's
// cipher feedback mode using the given cipher.Block, and an initial amount of
// ciphertext. randData must be random bytes and be the same length as the
// cipher.Block's block size. Resync determines if the "resynchronization step"
// from RFC 4880, 13.9 step 7 is performed. Different parts of OpenPGP vary on
// this point.
func NewOCFBEncrypter(block cipher.Block, randData []byte, resync OCFBResyncOption) (cipher.Stream, []byte) {
blockSize := block.BlockSize()
if len(randData) != blockSize {
return nil, nil
}
x := &ocfbEncrypter{
b: block,
fre: make([]byte, blockSize),
outUsed: 0,
}
prefix := make([]byte, blockSize+2)
block.Encrypt(x.fre, x.fre)
for i := 0; i < blockSize; i++ {
prefix[i] = randData[i] ^ x.fre[i]
}
block.Encrypt(x.fre, prefix[:blockSize])
prefix[blockSize] = x.fre[0] ^ randData[blockSize-2]
prefix[blockSize+1] = x.fre[1] ^ randData[blockSize-1]
if resync {
block.Encrypt(x.fre, prefix[2:])
} else {
x.fre[0] = prefix[blockSize]
x.fre[1] = prefix[blockSize+1]
x.outUsed = 2
}
return x, prefix
}
func (x *ocfbEncrypter) XORKeyStream(dst, src []byte) {
for i := 0; i < len(src); i++ {
if x.outUsed == len(x.fre) {
x.b.Encrypt(x.fre, x.fre)
x.outUsed = 0
}
x.fre[x.outUsed] ^= src[i]
dst[i] = x.fre[x.outUsed]
x.outUsed++
}
}
type ocfbDecrypter struct {
b cipher.Block
fre []byte
outUsed int
}
// NewOCFBDecrypter returns a cipher.Stream which decrypts data with OpenPGP's
// cipher feedback mode using the given cipher.Block. Prefix must be the first
// blockSize + 2 bytes of the ciphertext, where blockSize is the cipher.Block's
// block size. If an incorrect key is detected then nil is returned. On
// successful exit, blockSize+2 bytes of decrypted data are written into
// prefix. Resync determines if the "resynchronization step" from RFC 4880,
// 13.9 step 7 is performed. Different parts of OpenPGP vary on this point.
func NewOCFBDecrypter(block cipher.Block, prefix []byte, resync OCFBResyncOption) cipher.Stream {
blockSize := block.BlockSize()
if len(prefix) != blockSize+2 {
return nil
}
x := &ocfbDecrypter{
b: block,
fre: make([]byte, blockSize),
outUsed: 0,
}
prefixCopy := make([]byte, len(prefix))
copy(prefixCopy, prefix)
block.Encrypt(x.fre, x.fre)
for i := 0; i < blockSize; i++ {
prefixCopy[i] ^= x.fre[i]
}
block.Encrypt(x.fre, prefix[:blockSize])
prefixCopy[blockSize] ^= x.fre[0]
prefixCopy[blockSize+1] ^= x.fre[1]
if prefixCopy[blockSize-2] != prefixCopy[blockSize] ||
prefixCopy[blockSize-1] != prefixCopy[blockSize+1] {
return nil
}
if resync {
block.Encrypt(x.fre, prefix[2:])
} else {
x.fre[0] = prefix[blockSize]
x.fre[1] = prefix[blockSize+1]
x.outUsed = 2
}
copy(prefix, prefixCopy)
return x
}
func (x *ocfbDecrypter) XORKeyStream(dst, src []byte) {
for i := 0; i < len(src); i++ {
if x.outUsed == len(x.fre) {
x.b.Encrypt(x.fre, x.fre)
x.outUsed = 0
}
c := src[i]
dst[i] = x.fre[x.outUsed] ^ src[i]
x.fre[x.outUsed] = c
x.outUsed++
}
}

View File

@ -0,0 +1,73 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"crypto"
"encoding/binary"
"golang.org/x/crypto/openpgp/errors"
"golang.org/x/crypto/openpgp/s2k"
"io"
"strconv"
)
// OnePassSignature represents a one-pass signature packet. See RFC 4880,
// section 5.4.
type OnePassSignature struct {
SigType SignatureType
Hash crypto.Hash
PubKeyAlgo PublicKeyAlgorithm
KeyId uint64
IsLast bool
}
const onePassSignatureVersion = 3
func (ops *OnePassSignature) parse(r io.Reader) (err error) {
var buf [13]byte
_, err = readFull(r, buf[:])
if err != nil {
return
}
if buf[0] != onePassSignatureVersion {
err = errors.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0])))
}
var ok bool
ops.Hash, ok = s2k.HashIdToHash(buf[2])
if !ok {
return errors.UnsupportedError("hash function: " + strconv.Itoa(int(buf[2])))
}
ops.SigType = SignatureType(buf[1])
ops.PubKeyAlgo = PublicKeyAlgorithm(buf[3])
ops.KeyId = binary.BigEndian.Uint64(buf[4:12])
ops.IsLast = buf[12] != 0
return
}
// Serialize marshals the given OnePassSignature to w.
func (ops *OnePassSignature) Serialize(w io.Writer) error {
var buf [13]byte
buf[0] = onePassSignatureVersion
buf[1] = uint8(ops.SigType)
var ok bool
buf[2], ok = s2k.HashToHashId(ops.Hash)
if !ok {
return errors.UnsupportedError("hash type: " + strconv.Itoa(int(ops.Hash)))
}
buf[3] = uint8(ops.PubKeyAlgo)
binary.BigEndian.PutUint64(buf[4:12], ops.KeyId)
if ops.IsLast {
buf[12] = 1
}
if err := serializeHeader(w, packetTypeOnePassSignature, len(buf)); err != nil {
return err
}
_, err := w.Write(buf[:])
return err
}

162
vendor/golang.org/x/crypto/openpgp/packet/opaque.go generated vendored Normal file
View File

@ -0,0 +1,162 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"bytes"
"io"
"io/ioutil"
"golang.org/x/crypto/openpgp/errors"
)
// OpaquePacket represents an OpenPGP packet as raw, unparsed data. This is
// useful for splitting and storing the original packet contents separately,
// handling unsupported packet types or accessing parts of the packet not yet
// implemented by this package.
type OpaquePacket struct {
// Packet type
Tag uint8
// Reason why the packet was parsed opaquely
Reason error
// Binary contents of the packet data
Contents []byte
}
func (op *OpaquePacket) parse(r io.Reader) (err error) {
op.Contents, err = ioutil.ReadAll(r)
return
}
// Serialize marshals the packet to a writer in its original form, including
// the packet header.
func (op *OpaquePacket) Serialize(w io.Writer) (err error) {
err = serializeHeader(w, packetType(op.Tag), len(op.Contents))
if err == nil {
_, err = w.Write(op.Contents)
}
return
}
// Parse attempts to parse the opaque contents into a structure supported by
// this package. If the packet is not known then the result will be another
// OpaquePacket.
func (op *OpaquePacket) Parse() (p Packet, err error) {
hdr := bytes.NewBuffer(nil)
err = serializeHeader(hdr, packetType(op.Tag), len(op.Contents))
if err != nil {
op.Reason = err
return op, err
}
p, err = Read(io.MultiReader(hdr, bytes.NewBuffer(op.Contents)))
if err != nil {
op.Reason = err
p = op
}
return
}
// OpaqueReader reads OpaquePackets from an io.Reader.
type OpaqueReader struct {
r io.Reader
}
func NewOpaqueReader(r io.Reader) *OpaqueReader {
return &OpaqueReader{r: r}
}
// Read the next OpaquePacket.
func (or *OpaqueReader) Next() (op *OpaquePacket, err error) {
tag, _, contents, err := readHeader(or.r)
if err != nil {
return
}
op = &OpaquePacket{Tag: uint8(tag), Reason: err}
err = op.parse(contents)
if err != nil {
consumeAll(contents)
}
return
}
// OpaqueSubpacket represents an unparsed OpenPGP subpacket,
// as found in signature and user attribute packets.
type OpaqueSubpacket struct {
SubType uint8
Contents []byte
}
// OpaqueSubpackets extracts opaque, unparsed OpenPGP subpackets from
// their byte representation.
func OpaqueSubpackets(contents []byte) (result []*OpaqueSubpacket, err error) {
var (
subHeaderLen int
subPacket *OpaqueSubpacket
)
for len(contents) > 0 {
subHeaderLen, subPacket, err = nextSubpacket(contents)
if err != nil {
break
}
result = append(result, subPacket)
contents = contents[subHeaderLen+len(subPacket.Contents):]
}
return
}
func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacket, err error) {
// RFC 4880, section 5.2.3.1
var subLen uint32
if len(contents) < 1 {
goto Truncated
}
subPacket = &OpaqueSubpacket{}
switch {
case contents[0] < 192:
subHeaderLen = 2 // 1 length byte, 1 subtype byte
if len(contents) < subHeaderLen {
goto Truncated
}
subLen = uint32(contents[0])
contents = contents[1:]
case contents[0] < 255:
subHeaderLen = 3 // 2 length bytes, 1 subtype
if len(contents) < subHeaderLen {
goto Truncated
}
subLen = uint32(contents[0]-192)<<8 + uint32(contents[1]) + 192
contents = contents[2:]
default:
subHeaderLen = 6 // 5 length bytes, 1 subtype
if len(contents) < subHeaderLen {
goto Truncated
}
subLen = uint32(contents[1])<<24 |
uint32(contents[2])<<16 |
uint32(contents[3])<<8 |
uint32(contents[4])
contents = contents[5:]
}
if subLen > uint32(len(contents)) || subLen == 0 {
goto Truncated
}
subPacket.SubType = contents[0]
subPacket.Contents = contents[1:subLen]
return
Truncated:
err = errors.StructuralError("subpacket truncated")
return
}
func (osp *OpaqueSubpacket) Serialize(w io.Writer) (err error) {
buf := make([]byte, 6)
n := serializeSubpacketLength(buf, len(osp.Contents)+1)
buf[n] = osp.SubType
if _, err = w.Write(buf[:n+1]); err != nil {
return
}
_, err = w.Write(osp.Contents)
return
}

539
vendor/golang.org/x/crypto/openpgp/packet/packet.go generated vendored Normal file
View File

@ -0,0 +1,539 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package packet implements parsing and serialization of OpenPGP packets, as
// specified in RFC 4880.
package packet // import "golang.org/x/crypto/openpgp/packet"
import (
"bufio"
"crypto/aes"
"crypto/cipher"
"crypto/des"
"golang.org/x/crypto/cast5"
"golang.org/x/crypto/openpgp/errors"
"io"
"math/big"
)
// readFull is the same as io.ReadFull except that reading zero bytes returns
// ErrUnexpectedEOF rather than EOF.
func readFull(r io.Reader, buf []byte) (n int, err error) {
n, err = io.ReadFull(r, buf)
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return
}
// readLength reads an OpenPGP length from r. See RFC 4880, section 4.2.2.
func readLength(r io.Reader) (length int64, isPartial bool, err error) {
var buf [4]byte
_, err = readFull(r, buf[:1])
if err != nil {
return
}
switch {
case buf[0] < 192:
length = int64(buf[0])
case buf[0] < 224:
length = int64(buf[0]-192) << 8
_, err = readFull(r, buf[0:1])
if err != nil {
return
}
length += int64(buf[0]) + 192
case buf[0] < 255:
length = int64(1) << (buf[0] & 0x1f)
isPartial = true
default:
_, err = readFull(r, buf[0:4])
if err != nil {
return
}
length = int64(buf[0])<<24 |
int64(buf[1])<<16 |
int64(buf[2])<<8 |
int64(buf[3])
}
return
}
// partialLengthReader wraps an io.Reader and handles OpenPGP partial lengths.
// The continuation lengths are parsed and removed from the stream and EOF is
// returned at the end of the packet. See RFC 4880, section 4.2.2.4.
type partialLengthReader struct {
r io.Reader
remaining int64
isPartial bool
}
func (r *partialLengthReader) Read(p []byte) (n int, err error) {
for r.remaining == 0 {
if !r.isPartial {
return 0, io.EOF
}
r.remaining, r.isPartial, err = readLength(r.r)
if err != nil {
return 0, err
}
}
toRead := int64(len(p))
if toRead > r.remaining {
toRead = r.remaining
}
n, err = r.r.Read(p[:int(toRead)])
r.remaining -= int64(n)
if n < int(toRead) && err == io.EOF {
err = io.ErrUnexpectedEOF
}
return
}
// partialLengthWriter writes a stream of data using OpenPGP partial lengths.
// See RFC 4880, section 4.2.2.4.
type partialLengthWriter struct {
w io.WriteCloser
lengthByte [1]byte
}
func (w *partialLengthWriter) Write(p []byte) (n int, err error) {
for len(p) > 0 {
for power := uint(14); power < 32; power-- {
l := 1 << power
if len(p) >= l {
w.lengthByte[0] = 224 + uint8(power)
_, err = w.w.Write(w.lengthByte[:])
if err != nil {
return
}
var m int
m, err = w.w.Write(p[:l])
n += m
if err != nil {
return
}
p = p[l:]
break
}
}
}
return
}
func (w *partialLengthWriter) Close() error {
w.lengthByte[0] = 0
_, err := w.w.Write(w.lengthByte[:])
if err != nil {
return err
}
return w.w.Close()
}
// A spanReader is an io.LimitReader, but it returns ErrUnexpectedEOF if the
// underlying Reader returns EOF before the limit has been reached.
type spanReader struct {
r io.Reader
n int64
}
func (l *spanReader) Read(p []byte) (n int, err error) {
if l.n <= 0 {
return 0, io.EOF
}
if int64(len(p)) > l.n {
p = p[0:l.n]
}
n, err = l.r.Read(p)
l.n -= int64(n)
if l.n > 0 && err == io.EOF {
err = io.ErrUnexpectedEOF
}
return
}
// readHeader parses a packet header and returns an io.Reader which will return
// the contents of the packet. See RFC 4880, section 4.2.
func readHeader(r io.Reader) (tag packetType, length int64, contents io.Reader, err error) {
var buf [4]byte
_, err = io.ReadFull(r, buf[:1])
if err != nil {
return
}
if buf[0]&0x80 == 0 {
err = errors.StructuralError("tag byte does not have MSB set")
return
}
if buf[0]&0x40 == 0 {
// Old format packet
tag = packetType((buf[0] & 0x3f) >> 2)
lengthType := buf[0] & 3
if lengthType == 3 {
length = -1
contents = r
return
}
lengthBytes := 1 << lengthType
_, err = readFull(r, buf[0:lengthBytes])
if err != nil {
return
}
for i := 0; i < lengthBytes; i++ {
length <<= 8
length |= int64(buf[i])
}
contents = &spanReader{r, length}
return
}
// New format packet
tag = packetType(buf[0] & 0x3f)
length, isPartial, err := readLength(r)
if err != nil {
return
}
if isPartial {
contents = &partialLengthReader{
remaining: length,
isPartial: true,
r: r,
}
length = -1
} else {
contents = &spanReader{r, length}
}
return
}
// serializeHeader writes an OpenPGP packet header to w. See RFC 4880, section
// 4.2.
func serializeHeader(w io.Writer, ptype packetType, length int) (err error) {
var buf [6]byte
var n int
buf[0] = 0x80 | 0x40 | byte(ptype)
if length < 192 {
buf[1] = byte(length)
n = 2
} else if length < 8384 {
length -= 192
buf[1] = 192 + byte(length>>8)
buf[2] = byte(length)
n = 3
} else {
buf[1] = 255
buf[2] = byte(length >> 24)
buf[3] = byte(length >> 16)
buf[4] = byte(length >> 8)
buf[5] = byte(length)
n = 6
}
_, err = w.Write(buf[:n])
return
}
// serializeStreamHeader writes an OpenPGP packet header to w where the
// length of the packet is unknown. It returns a io.WriteCloser which can be
// used to write the contents of the packet. See RFC 4880, section 4.2.
func serializeStreamHeader(w io.WriteCloser, ptype packetType) (out io.WriteCloser, err error) {
var buf [1]byte
buf[0] = 0x80 | 0x40 | byte(ptype)
_, err = w.Write(buf[:])
if err != nil {
return
}
out = &partialLengthWriter{w: w}
return
}
// Packet represents an OpenPGP packet. Users are expected to try casting
// instances of this interface to specific packet types.
type Packet interface {
parse(io.Reader) error
}
// consumeAll reads from the given Reader until error, returning the number of
// bytes read.
func consumeAll(r io.Reader) (n int64, err error) {
var m int
var buf [1024]byte
for {
m, err = r.Read(buf[:])
n += int64(m)
if err == io.EOF {
err = nil
return
}
if err != nil {
return
}
}
panic("unreachable")
}
// packetType represents the numeric ids of the different OpenPGP packet types. See
// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-2
type packetType uint8
const (
packetTypeEncryptedKey packetType = 1
packetTypeSignature packetType = 2
packetTypeSymmetricKeyEncrypted packetType = 3
packetTypeOnePassSignature packetType = 4
packetTypePrivateKey packetType = 5
packetTypePublicKey packetType = 6
packetTypePrivateSubkey packetType = 7
packetTypeCompressed packetType = 8
packetTypeSymmetricallyEncrypted packetType = 9
packetTypeLiteralData packetType = 11
packetTypeUserId packetType = 13
packetTypePublicSubkey packetType = 14
packetTypeUserAttribute packetType = 17
packetTypeSymmetricallyEncryptedMDC packetType = 18
)
// peekVersion detects the version of a public key packet about to
// be read. A bufio.Reader at the original position of the io.Reader
// is returned.
func peekVersion(r io.Reader) (bufr *bufio.Reader, ver byte, err error) {
bufr = bufio.NewReader(r)
var verBuf []byte
if verBuf, err = bufr.Peek(1); err != nil {
return
}
ver = verBuf[0]
return
}
// Read reads a single OpenPGP packet from the given io.Reader. If there is an
// error parsing a packet, the whole packet is consumed from the input.
func Read(r io.Reader) (p Packet, err error) {
tag, _, contents, err := readHeader(r)
if err != nil {
return
}
switch tag {
case packetTypeEncryptedKey:
p = new(EncryptedKey)
case packetTypeSignature:
var version byte
// Detect signature version
if contents, version, err = peekVersion(contents); err != nil {
return
}
if version < 4 {
p = new(SignatureV3)
} else {
p = new(Signature)
}
case packetTypeSymmetricKeyEncrypted:
p = new(SymmetricKeyEncrypted)
case packetTypeOnePassSignature:
p = new(OnePassSignature)
case packetTypePrivateKey, packetTypePrivateSubkey:
pk := new(PrivateKey)
if tag == packetTypePrivateSubkey {
pk.IsSubkey = true
}
p = pk
case packetTypePublicKey, packetTypePublicSubkey:
var version byte
if contents, version, err = peekVersion(contents); err != nil {
return
}
isSubkey := tag == packetTypePublicSubkey
if version < 4 {
p = &PublicKeyV3{IsSubkey: isSubkey}
} else {
p = &PublicKey{IsSubkey: isSubkey}
}
case packetTypeCompressed:
p = new(Compressed)
case packetTypeSymmetricallyEncrypted:
p = new(SymmetricallyEncrypted)
case packetTypeLiteralData:
p = new(LiteralData)
case packetTypeUserId:
p = new(UserId)
case packetTypeUserAttribute:
p = new(UserAttribute)
case packetTypeSymmetricallyEncryptedMDC:
se := new(SymmetricallyEncrypted)
se.MDC = true
p = se
default:
err = errors.UnknownPacketTypeError(tag)
}
if p != nil {
err = p.parse(contents)
}
if err != nil {
consumeAll(contents)
}
return
}
// SignatureType represents the different semantic meanings of an OpenPGP
// signature. See RFC 4880, section 5.2.1.
type SignatureType uint8
const (
SigTypeBinary SignatureType = 0
SigTypeText = 1
SigTypeGenericCert = 0x10
SigTypePersonaCert = 0x11
SigTypeCasualCert = 0x12
SigTypePositiveCert = 0x13
SigTypeSubkeyBinding = 0x18
SigTypePrimaryKeyBinding = 0x19
SigTypeDirectSignature = 0x1F
SigTypeKeyRevocation = 0x20
SigTypeSubkeyRevocation = 0x28
)
// PublicKeyAlgorithm represents the different public key system specified for
// OpenPGP. See
// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-12
type PublicKeyAlgorithm uint8
const (
PubKeyAlgoRSA PublicKeyAlgorithm = 1
PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2
PubKeyAlgoRSASignOnly PublicKeyAlgorithm = 3
PubKeyAlgoElGamal PublicKeyAlgorithm = 16
PubKeyAlgoDSA PublicKeyAlgorithm = 17
// RFC 6637, Section 5.
PubKeyAlgoECDH PublicKeyAlgorithm = 18
PubKeyAlgoECDSA PublicKeyAlgorithm = 19
)
// CanEncrypt returns true if it's possible to encrypt a message to a public
// key of the given type.
func (pka PublicKeyAlgorithm) CanEncrypt() bool {
switch pka {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal:
return true
}
return false
}
// CanSign returns true if it's possible for a public key of the given type to
// sign a message.
func (pka PublicKeyAlgorithm) CanSign() bool {
switch pka {
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA:
return true
}
return false
}
// CipherFunction represents the different block ciphers specified for OpenPGP. See
// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13
type CipherFunction uint8
const (
Cipher3DES CipherFunction = 2
CipherCAST5 CipherFunction = 3
CipherAES128 CipherFunction = 7
CipherAES192 CipherFunction = 8
CipherAES256 CipherFunction = 9
)
// KeySize returns the key size, in bytes, of cipher.
func (cipher CipherFunction) KeySize() int {
switch cipher {
case Cipher3DES:
return 24
case CipherCAST5:
return cast5.KeySize
case CipherAES128:
return 16
case CipherAES192:
return 24
case CipherAES256:
return 32
}
return 0
}
// blockSize returns the block size, in bytes, of cipher.
func (cipher CipherFunction) blockSize() int {
switch cipher {
case Cipher3DES:
return des.BlockSize
case CipherCAST5:
return 8
case CipherAES128, CipherAES192, CipherAES256:
return 16
}
return 0
}
// new returns a fresh instance of the given cipher.
func (cipher CipherFunction) new(key []byte) (block cipher.Block) {
switch cipher {
case Cipher3DES:
block, _ = des.NewTripleDESCipher(key)
case CipherCAST5:
block, _ = cast5.NewCipher(key)
case CipherAES128, CipherAES192, CipherAES256:
block, _ = aes.NewCipher(key)
}
return
}
// readMPI reads a big integer from r. The bit length returned is the bit
// length that was specified in r. This is preserved so that the integer can be
// reserialized exactly.
func readMPI(r io.Reader) (mpi []byte, bitLength uint16, err error) {
var buf [2]byte
_, err = readFull(r, buf[0:])
if err != nil {
return
}
bitLength = uint16(buf[0])<<8 | uint16(buf[1])
numBytes := (int(bitLength) + 7) / 8
mpi = make([]byte, numBytes)
_, err = readFull(r, mpi)
return
}
// mpiLength returns the length of the given *big.Int when serialized as an
// MPI.
func mpiLength(n *big.Int) (mpiLengthInBytes int) {
mpiLengthInBytes = 2 /* MPI length */
mpiLengthInBytes += (n.BitLen() + 7) / 8
return
}
// writeMPI serializes a big integer to w.
func writeMPI(w io.Writer, bitLength uint16, mpiBytes []byte) (err error) {
_, err = w.Write([]byte{byte(bitLength >> 8), byte(bitLength)})
if err == nil {
_, err = w.Write(mpiBytes)
}
return
}
// writeBig serializes a *big.Int to w.
func writeBig(w io.Writer, i *big.Int) error {
return writeMPI(w, uint16(i.BitLen()), i.Bytes())
}
// CompressionAlgo Represents the different compression algorithms
// supported by OpenPGP (except for BZIP2, which is not currently
// supported). See Section 9.3 of RFC 4880.
type CompressionAlgo uint8
const (
CompressionNone CompressionAlgo = 0
CompressionZIP CompressionAlgo = 1
CompressionZLIB CompressionAlgo = 2
)

View File

@ -0,0 +1,380 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"bytes"
"crypto"
"crypto/cipher"
"crypto/dsa"
"crypto/ecdsa"
"crypto/rsa"
"crypto/sha1"
"io"
"io/ioutil"
"math/big"
"strconv"
"time"
"golang.org/x/crypto/openpgp/elgamal"
"golang.org/x/crypto/openpgp/errors"
"golang.org/x/crypto/openpgp/s2k"
)
// PrivateKey represents a possibly encrypted private key. See RFC 4880,
// section 5.5.3.
type PrivateKey struct {
PublicKey
Encrypted bool // if true then the private key is unavailable until Decrypt has been called.
encryptedData []byte
cipher CipherFunction
s2k func(out, in []byte)
PrivateKey interface{} // An *{rsa|dsa|ecdsa}.PrivateKey or a crypto.Signer.
sha1Checksum bool
iv []byte
}
func NewRSAPrivateKey(currentTime time.Time, priv *rsa.PrivateKey) *PrivateKey {
pk := new(PrivateKey)
pk.PublicKey = *NewRSAPublicKey(currentTime, &priv.PublicKey)
pk.PrivateKey = priv
return pk
}
func NewDSAPrivateKey(currentTime time.Time, priv *dsa.PrivateKey) *PrivateKey {
pk := new(PrivateKey)
pk.PublicKey = *NewDSAPublicKey(currentTime, &priv.PublicKey)
pk.PrivateKey = priv
return pk
}
func NewElGamalPrivateKey(currentTime time.Time, priv *elgamal.PrivateKey) *PrivateKey {
pk := new(PrivateKey)
pk.PublicKey = *NewElGamalPublicKey(currentTime, &priv.PublicKey)
pk.PrivateKey = priv
return pk
}
func NewECDSAPrivateKey(currentTime time.Time, priv *ecdsa.PrivateKey) *PrivateKey {
pk := new(PrivateKey)
pk.PublicKey = *NewECDSAPublicKey(currentTime, &priv.PublicKey)
pk.PrivateKey = priv
return pk
}
// NewSignerPrivateKey creates a sign-only PrivateKey from a crypto.Signer that
// implements RSA or ECDSA.
func NewSignerPrivateKey(currentTime time.Time, signer crypto.Signer) *PrivateKey {
pk := new(PrivateKey)
switch pubkey := signer.Public().(type) {
case rsa.PublicKey:
pk.PublicKey = *NewRSAPublicKey(currentTime, &pubkey)
pk.PubKeyAlgo = PubKeyAlgoRSASignOnly
case ecdsa.PublicKey:
pk.PublicKey = *NewECDSAPublicKey(currentTime, &pubkey)
default:
panic("openpgp: unknown crypto.Signer type in NewSignerPrivateKey")
}
pk.PrivateKey = signer
return pk
}
func (pk *PrivateKey) parse(r io.Reader) (err error) {
err = (&pk.PublicKey).parse(r)
if err != nil {
return
}
var buf [1]byte
_, err = readFull(r, buf[:])
if err != nil {
return
}
s2kType := buf[0]
switch s2kType {
case 0:
pk.s2k = nil
pk.Encrypted = false
case 254, 255:
_, err = readFull(r, buf[:])
if err != nil {
return
}
pk.cipher = CipherFunction(buf[0])
pk.Encrypted = true
pk.s2k, err = s2k.Parse(r)
if err != nil {
return
}
if s2kType == 254 {
pk.sha1Checksum = true
}
default:
return errors.UnsupportedError("deprecated s2k function in private key")
}
if pk.Encrypted {
blockSize := pk.cipher.blockSize()
if blockSize == 0 {
return errors.UnsupportedError("unsupported cipher in private key: " + strconv.Itoa(int(pk.cipher)))
}
pk.iv = make([]byte, blockSize)
_, err = readFull(r, pk.iv)
if err != nil {
return
}
}
pk.encryptedData, err = ioutil.ReadAll(r)
if err != nil {
return
}
if !pk.Encrypted {
return pk.parsePrivateKey(pk.encryptedData)
}
return
}
func mod64kHash(d []byte) uint16 {
var h uint16
for _, b := range d {
h += uint16(b)
}
return h
}
func (pk *PrivateKey) Serialize(w io.Writer) (err error) {
// TODO(agl): support encrypted private keys
buf := bytes.NewBuffer(nil)
err = pk.PublicKey.serializeWithoutHeaders(buf)
if err != nil {
return
}
buf.WriteByte(0 /* no encryption */)
privateKeyBuf := bytes.NewBuffer(nil)
switch priv := pk.PrivateKey.(type) {
case *rsa.PrivateKey:
err = serializeRSAPrivateKey(privateKeyBuf, priv)
case *dsa.PrivateKey:
err = serializeDSAPrivateKey(privateKeyBuf, priv)
case *elgamal.PrivateKey:
err = serializeElGamalPrivateKey(privateKeyBuf, priv)
case *ecdsa.PrivateKey:
err = serializeECDSAPrivateKey(privateKeyBuf, priv)
default:
err = errors.InvalidArgumentError("unknown private key type")
}
if err != nil {
return
}
ptype := packetTypePrivateKey
contents := buf.Bytes()
privateKeyBytes := privateKeyBuf.Bytes()
if pk.IsSubkey {
ptype = packetTypePrivateSubkey
}
err = serializeHeader(w, ptype, len(contents)+len(privateKeyBytes)+2)
if err != nil {
return
}
_, err = w.Write(contents)
if err != nil {
return
}
_, err = w.Write(privateKeyBytes)
if err != nil {
return
}
checksum := mod64kHash(privateKeyBytes)
var checksumBytes [2]byte
checksumBytes[0] = byte(checksum >> 8)
checksumBytes[1] = byte(checksum)
_, err = w.Write(checksumBytes[:])
return
}
func serializeRSAPrivateKey(w io.Writer, priv *rsa.PrivateKey) error {
err := writeBig(w, priv.D)
if err != nil {
return err
}
err = writeBig(w, priv.Primes[1])
if err != nil {
return err
}
err = writeBig(w, priv.Primes[0])
if err != nil {
return err
}
return writeBig(w, priv.Precomputed.Qinv)
}
func serializeDSAPrivateKey(w io.Writer, priv *dsa.PrivateKey) error {
return writeBig(w, priv.X)
}
func serializeElGamalPrivateKey(w io.Writer, priv *elgamal.PrivateKey) error {
return writeBig(w, priv.X)
}
func serializeECDSAPrivateKey(w io.Writer, priv *ecdsa.PrivateKey) error {
return writeBig(w, priv.D)
}
// Decrypt decrypts an encrypted private key using a passphrase.
func (pk *PrivateKey) Decrypt(passphrase []byte) error {
if !pk.Encrypted {
return nil
}
key := make([]byte, pk.cipher.KeySize())
pk.s2k(key, passphrase)
block := pk.cipher.new(key)
cfb := cipher.NewCFBDecrypter(block, pk.iv)
data := make([]byte, len(pk.encryptedData))
cfb.XORKeyStream(data, pk.encryptedData)
if pk.sha1Checksum {
if len(data) < sha1.Size {
return errors.StructuralError("truncated private key data")
}
h := sha1.New()
h.Write(data[:len(data)-sha1.Size])
sum := h.Sum(nil)
if !bytes.Equal(sum, data[len(data)-sha1.Size:]) {
return errors.StructuralError("private key checksum failure")
}
data = data[:len(data)-sha1.Size]
} else {
if len(data) < 2 {
return errors.StructuralError("truncated private key data")
}
var sum uint16
for i := 0; i < len(data)-2; i++ {
sum += uint16(data[i])
}
if data[len(data)-2] != uint8(sum>>8) ||
data[len(data)-1] != uint8(sum) {
return errors.StructuralError("private key checksum failure")
}
data = data[:len(data)-2]
}
return pk.parsePrivateKey(data)
}
func (pk *PrivateKey) parsePrivateKey(data []byte) (err error) {
switch pk.PublicKey.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoRSAEncryptOnly:
return pk.parseRSAPrivateKey(data)
case PubKeyAlgoDSA:
return pk.parseDSAPrivateKey(data)
case PubKeyAlgoElGamal:
return pk.parseElGamalPrivateKey(data)
case PubKeyAlgoECDSA:
return pk.parseECDSAPrivateKey(data)
}
panic("impossible")
}
func (pk *PrivateKey) parseRSAPrivateKey(data []byte) (err error) {
rsaPub := pk.PublicKey.PublicKey.(*rsa.PublicKey)
rsaPriv := new(rsa.PrivateKey)
rsaPriv.PublicKey = *rsaPub
buf := bytes.NewBuffer(data)
d, _, err := readMPI(buf)
if err != nil {
return
}
p, _, err := readMPI(buf)
if err != nil {
return
}
q, _, err := readMPI(buf)
if err != nil {
return
}
rsaPriv.D = new(big.Int).SetBytes(d)
rsaPriv.Primes = make([]*big.Int, 2)
rsaPriv.Primes[0] = new(big.Int).SetBytes(p)
rsaPriv.Primes[1] = new(big.Int).SetBytes(q)
if err := rsaPriv.Validate(); err != nil {
return err
}
rsaPriv.Precompute()
pk.PrivateKey = rsaPriv
pk.Encrypted = false
pk.encryptedData = nil
return nil
}
func (pk *PrivateKey) parseDSAPrivateKey(data []byte) (err error) {
dsaPub := pk.PublicKey.PublicKey.(*dsa.PublicKey)
dsaPriv := new(dsa.PrivateKey)
dsaPriv.PublicKey = *dsaPub
buf := bytes.NewBuffer(data)
x, _, err := readMPI(buf)
if err != nil {
return
}
dsaPriv.X = new(big.Int).SetBytes(x)
pk.PrivateKey = dsaPriv
pk.Encrypted = false
pk.encryptedData = nil
return nil
}
func (pk *PrivateKey) parseElGamalPrivateKey(data []byte) (err error) {
pub := pk.PublicKey.PublicKey.(*elgamal.PublicKey)
priv := new(elgamal.PrivateKey)
priv.PublicKey = *pub
buf := bytes.NewBuffer(data)
x, _, err := readMPI(buf)
if err != nil {
return
}
priv.X = new(big.Int).SetBytes(x)
pk.PrivateKey = priv
pk.Encrypted = false
pk.encryptedData = nil
return nil
}
func (pk *PrivateKey) parseECDSAPrivateKey(data []byte) (err error) {
ecdsaPub := pk.PublicKey.PublicKey.(*ecdsa.PublicKey)
buf := bytes.NewBuffer(data)
d, _, err := readMPI(buf)
if err != nil {
return
}
pk.PrivateKey = &ecdsa.PrivateKey{
PublicKey: *ecdsaPub,
D: new(big.Int).SetBytes(d),
}
pk.Encrypted = false
pk.encryptedData = nil
return nil
}

750
vendor/golang.org/x/crypto/openpgp/packet/public_key.go generated vendored Normal file
View File

@ -0,0 +1,750 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"bytes"
"crypto"
"crypto/dsa"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rsa"
"crypto/sha1"
_ "crypto/sha256"
_ "crypto/sha512"
"encoding/binary"
"fmt"
"hash"
"io"
"math/big"
"strconv"
"time"
"golang.org/x/crypto/openpgp/elgamal"
"golang.org/x/crypto/openpgp/errors"
)
var (
// NIST curve P-256
oidCurveP256 []byte = []byte{0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x03, 0x01, 0x07}
// NIST curve P-384
oidCurveP384 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x22}
// NIST curve P-521
oidCurveP521 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x23}
)
const maxOIDLength = 8
// ecdsaKey stores the algorithm-specific fields for ECDSA keys.
// as defined in RFC 6637, Section 9.
type ecdsaKey struct {
// oid contains the OID byte sequence identifying the elliptic curve used
oid []byte
// p contains the elliptic curve point that represents the public key
p parsedMPI
}
// parseOID reads the OID for the curve as defined in RFC 6637, Section 9.
func parseOID(r io.Reader) (oid []byte, err error) {
buf := make([]byte, maxOIDLength)
if _, err = readFull(r, buf[:1]); err != nil {
return
}
oidLen := buf[0]
if int(oidLen) > len(buf) {
err = errors.UnsupportedError("invalid oid length: " + strconv.Itoa(int(oidLen)))
return
}
oid = buf[:oidLen]
_, err = readFull(r, oid)
return
}
func (f *ecdsaKey) parse(r io.Reader) (err error) {
if f.oid, err = parseOID(r); err != nil {
return err
}
f.p.bytes, f.p.bitLength, err = readMPI(r)
return
}
func (f *ecdsaKey) serialize(w io.Writer) (err error) {
buf := make([]byte, maxOIDLength+1)
buf[0] = byte(len(f.oid))
copy(buf[1:], f.oid)
if _, err = w.Write(buf[:len(f.oid)+1]); err != nil {
return
}
return writeMPIs(w, f.p)
}
func (f *ecdsaKey) newECDSA() (*ecdsa.PublicKey, error) {
var c elliptic.Curve
if bytes.Equal(f.oid, oidCurveP256) {
c = elliptic.P256()
} else if bytes.Equal(f.oid, oidCurveP384) {
c = elliptic.P384()
} else if bytes.Equal(f.oid, oidCurveP521) {
c = elliptic.P521()
} else {
return nil, errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", f.oid))
}
x, y := elliptic.Unmarshal(c, f.p.bytes)
if x == nil {
return nil, errors.UnsupportedError("failed to parse EC point")
}
return &ecdsa.PublicKey{Curve: c, X: x, Y: y}, nil
}
func (f *ecdsaKey) byteLen() int {
return 1 + len(f.oid) + 2 + len(f.p.bytes)
}
type kdfHashFunction byte
type kdfAlgorithm byte
// ecdhKdf stores key derivation function parameters
// used for ECDH encryption. See RFC 6637, Section 9.
type ecdhKdf struct {
KdfHash kdfHashFunction
KdfAlgo kdfAlgorithm
}
func (f *ecdhKdf) parse(r io.Reader) (err error) {
buf := make([]byte, 1)
if _, err = readFull(r, buf); err != nil {
return
}
kdfLen := int(buf[0])
if kdfLen < 3 {
return errors.UnsupportedError("Unsupported ECDH KDF length: " + strconv.Itoa(kdfLen))
}
buf = make([]byte, kdfLen)
if _, err = readFull(r, buf); err != nil {
return
}
reserved := int(buf[0])
f.KdfHash = kdfHashFunction(buf[1])
f.KdfAlgo = kdfAlgorithm(buf[2])
if reserved != 0x01 {
return errors.UnsupportedError("Unsupported KDF reserved field: " + strconv.Itoa(reserved))
}
return
}
func (f *ecdhKdf) serialize(w io.Writer) (err error) {
buf := make([]byte, 4)
// See RFC 6637, Section 9, Algorithm-Specific Fields for ECDH keys.
buf[0] = byte(0x03) // Length of the following fields
buf[1] = byte(0x01) // Reserved for future extensions, must be 1 for now
buf[2] = byte(f.KdfHash)
buf[3] = byte(f.KdfAlgo)
_, err = w.Write(buf[:])
return
}
func (f *ecdhKdf) byteLen() int {
return 4
}
// PublicKey represents an OpenPGP public key. See RFC 4880, section 5.5.2.
type PublicKey struct {
CreationTime time.Time
PubKeyAlgo PublicKeyAlgorithm
PublicKey interface{} // *rsa.PublicKey, *dsa.PublicKey or *ecdsa.PublicKey
Fingerprint [20]byte
KeyId uint64
IsSubkey bool
n, e, p, q, g, y parsedMPI
// RFC 6637 fields
ec *ecdsaKey
ecdh *ecdhKdf
}
// signingKey provides a convenient abstraction over signature verification
// for v3 and v4 public keys.
type signingKey interface {
SerializeSignaturePrefix(io.Writer)
serializeWithoutHeaders(io.Writer) error
}
func fromBig(n *big.Int) parsedMPI {
return parsedMPI{
bytes: n.Bytes(),
bitLength: uint16(n.BitLen()),
}
}
// NewRSAPublicKey returns a PublicKey that wraps the given rsa.PublicKey.
func NewRSAPublicKey(creationTime time.Time, pub *rsa.PublicKey) *PublicKey {
pk := &PublicKey{
CreationTime: creationTime,
PubKeyAlgo: PubKeyAlgoRSA,
PublicKey: pub,
n: fromBig(pub.N),
e: fromBig(big.NewInt(int64(pub.E))),
}
pk.setFingerPrintAndKeyId()
return pk
}
// NewDSAPublicKey returns a PublicKey that wraps the given dsa.PublicKey.
func NewDSAPublicKey(creationTime time.Time, pub *dsa.PublicKey) *PublicKey {
pk := &PublicKey{
CreationTime: creationTime,
PubKeyAlgo: PubKeyAlgoDSA,
PublicKey: pub,
p: fromBig(pub.P),
q: fromBig(pub.Q),
g: fromBig(pub.G),
y: fromBig(pub.Y),
}
pk.setFingerPrintAndKeyId()
return pk
}
// NewElGamalPublicKey returns a PublicKey that wraps the given elgamal.PublicKey.
func NewElGamalPublicKey(creationTime time.Time, pub *elgamal.PublicKey) *PublicKey {
pk := &PublicKey{
CreationTime: creationTime,
PubKeyAlgo: PubKeyAlgoElGamal,
PublicKey: pub,
p: fromBig(pub.P),
g: fromBig(pub.G),
y: fromBig(pub.Y),
}
pk.setFingerPrintAndKeyId()
return pk
}
func NewECDSAPublicKey(creationTime time.Time, pub *ecdsa.PublicKey) *PublicKey {
pk := &PublicKey{
CreationTime: creationTime,
PubKeyAlgo: PubKeyAlgoECDSA,
PublicKey: pub,
ec: new(ecdsaKey),
}
switch pub.Curve {
case elliptic.P256():
pk.ec.oid = oidCurveP256
case elliptic.P384():
pk.ec.oid = oidCurveP384
case elliptic.P521():
pk.ec.oid = oidCurveP521
default:
panic("unknown elliptic curve")
}
pk.ec.p.bytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y)
pk.ec.p.bitLength = uint16(8 * len(pk.ec.p.bytes))
pk.setFingerPrintAndKeyId()
return pk
}
func (pk *PublicKey) parse(r io.Reader) (err error) {
// RFC 4880, section 5.5.2
var buf [6]byte
_, err = readFull(r, buf[:])
if err != nil {
return
}
if buf[0] != 4 {
return errors.UnsupportedError("public key version")
}
pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0)
pk.PubKeyAlgo = PublicKeyAlgorithm(buf[5])
switch pk.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
err = pk.parseRSA(r)
case PubKeyAlgoDSA:
err = pk.parseDSA(r)
case PubKeyAlgoElGamal:
err = pk.parseElGamal(r)
case PubKeyAlgoECDSA:
pk.ec = new(ecdsaKey)
if err = pk.ec.parse(r); err != nil {
return err
}
pk.PublicKey, err = pk.ec.newECDSA()
case PubKeyAlgoECDH:
pk.ec = new(ecdsaKey)
if err = pk.ec.parse(r); err != nil {
return
}
pk.ecdh = new(ecdhKdf)
if err = pk.ecdh.parse(r); err != nil {
return
}
// The ECDH key is stored in an ecdsa.PublicKey for convenience.
pk.PublicKey, err = pk.ec.newECDSA()
default:
err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo)))
}
if err != nil {
return
}
pk.setFingerPrintAndKeyId()
return
}
func (pk *PublicKey) setFingerPrintAndKeyId() {
// RFC 4880, section 12.2
fingerPrint := sha1.New()
pk.SerializeSignaturePrefix(fingerPrint)
pk.serializeWithoutHeaders(fingerPrint)
copy(pk.Fingerprint[:], fingerPrint.Sum(nil))
pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[12:20])
}
// parseRSA parses RSA public key material from the given Reader. See RFC 4880,
// section 5.5.2.
func (pk *PublicKey) parseRSA(r io.Reader) (err error) {
pk.n.bytes, pk.n.bitLength, err = readMPI(r)
if err != nil {
return
}
pk.e.bytes, pk.e.bitLength, err = readMPI(r)
if err != nil {
return
}
if len(pk.e.bytes) > 3 {
err = errors.UnsupportedError("large public exponent")
return
}
rsa := &rsa.PublicKey{
N: new(big.Int).SetBytes(pk.n.bytes),
E: 0,
}
for i := 0; i < len(pk.e.bytes); i++ {
rsa.E <<= 8
rsa.E |= int(pk.e.bytes[i])
}
pk.PublicKey = rsa
return
}
// parseDSA parses DSA public key material from the given Reader. See RFC 4880,
// section 5.5.2.
func (pk *PublicKey) parseDSA(r io.Reader) (err error) {
pk.p.bytes, pk.p.bitLength, err = readMPI(r)
if err != nil {
return
}
pk.q.bytes, pk.q.bitLength, err = readMPI(r)
if err != nil {
return
}
pk.g.bytes, pk.g.bitLength, err = readMPI(r)
if err != nil {
return
}
pk.y.bytes, pk.y.bitLength, err = readMPI(r)
if err != nil {
return
}
dsa := new(dsa.PublicKey)
dsa.P = new(big.Int).SetBytes(pk.p.bytes)
dsa.Q = new(big.Int).SetBytes(pk.q.bytes)
dsa.G = new(big.Int).SetBytes(pk.g.bytes)
dsa.Y = new(big.Int).SetBytes(pk.y.bytes)
pk.PublicKey = dsa
return
}
// parseElGamal parses ElGamal public key material from the given Reader. See
// RFC 4880, section 5.5.2.
func (pk *PublicKey) parseElGamal(r io.Reader) (err error) {
pk.p.bytes, pk.p.bitLength, err = readMPI(r)
if err != nil {
return
}
pk.g.bytes, pk.g.bitLength, err = readMPI(r)
if err != nil {
return
}
pk.y.bytes, pk.y.bitLength, err = readMPI(r)
if err != nil {
return
}
elgamal := new(elgamal.PublicKey)
elgamal.P = new(big.Int).SetBytes(pk.p.bytes)
elgamal.G = new(big.Int).SetBytes(pk.g.bytes)
elgamal.Y = new(big.Int).SetBytes(pk.y.bytes)
pk.PublicKey = elgamal
return
}
// SerializeSignaturePrefix writes the prefix for this public key to the given Writer.
// The prefix is used when calculating a signature over this public key. See
// RFC 4880, section 5.2.4.
func (pk *PublicKey) SerializeSignaturePrefix(h io.Writer) {
var pLength uint16
switch pk.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
pLength += 2 + uint16(len(pk.n.bytes))
pLength += 2 + uint16(len(pk.e.bytes))
case PubKeyAlgoDSA:
pLength += 2 + uint16(len(pk.p.bytes))
pLength += 2 + uint16(len(pk.q.bytes))
pLength += 2 + uint16(len(pk.g.bytes))
pLength += 2 + uint16(len(pk.y.bytes))
case PubKeyAlgoElGamal:
pLength += 2 + uint16(len(pk.p.bytes))
pLength += 2 + uint16(len(pk.g.bytes))
pLength += 2 + uint16(len(pk.y.bytes))
case PubKeyAlgoECDSA:
pLength += uint16(pk.ec.byteLen())
case PubKeyAlgoECDH:
pLength += uint16(pk.ec.byteLen())
pLength += uint16(pk.ecdh.byteLen())
default:
panic("unknown public key algorithm")
}
pLength += 6
h.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)})
return
}
func (pk *PublicKey) Serialize(w io.Writer) (err error) {
length := 6 // 6 byte header
switch pk.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
length += 2 + len(pk.n.bytes)
length += 2 + len(pk.e.bytes)
case PubKeyAlgoDSA:
length += 2 + len(pk.p.bytes)
length += 2 + len(pk.q.bytes)
length += 2 + len(pk.g.bytes)
length += 2 + len(pk.y.bytes)
case PubKeyAlgoElGamal:
length += 2 + len(pk.p.bytes)
length += 2 + len(pk.g.bytes)
length += 2 + len(pk.y.bytes)
case PubKeyAlgoECDSA:
length += pk.ec.byteLen()
case PubKeyAlgoECDH:
length += pk.ec.byteLen()
length += pk.ecdh.byteLen()
default:
panic("unknown public key algorithm")
}
packetType := packetTypePublicKey
if pk.IsSubkey {
packetType = packetTypePublicSubkey
}
err = serializeHeader(w, packetType, length)
if err != nil {
return
}
return pk.serializeWithoutHeaders(w)
}
// serializeWithoutHeaders marshals the PublicKey to w in the form of an
// OpenPGP public key packet, not including the packet header.
func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err error) {
var buf [6]byte
buf[0] = 4
t := uint32(pk.CreationTime.Unix())
buf[1] = byte(t >> 24)
buf[2] = byte(t >> 16)
buf[3] = byte(t >> 8)
buf[4] = byte(t)
buf[5] = byte(pk.PubKeyAlgo)
_, err = w.Write(buf[:])
if err != nil {
return
}
switch pk.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
return writeMPIs(w, pk.n, pk.e)
case PubKeyAlgoDSA:
return writeMPIs(w, pk.p, pk.q, pk.g, pk.y)
case PubKeyAlgoElGamal:
return writeMPIs(w, pk.p, pk.g, pk.y)
case PubKeyAlgoECDSA:
return pk.ec.serialize(w)
case PubKeyAlgoECDH:
if err = pk.ec.serialize(w); err != nil {
return
}
return pk.ecdh.serialize(w)
}
return errors.InvalidArgumentError("bad public-key algorithm")
}
// CanSign returns true iff this public key can generate signatures
func (pk *PublicKey) CanSign() bool {
return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly && pk.PubKeyAlgo != PubKeyAlgoElGamal
}
// VerifySignature returns nil iff sig is a valid signature, made by this
// public key, of the data hashed into signed. signed is mutated by this call.
func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err error) {
if !pk.CanSign() {
return errors.InvalidArgumentError("public key cannot generate signatures")
}
signed.Write(sig.HashSuffix)
hashBytes := signed.Sum(nil)
if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] {
return errors.SignatureError("hash tag doesn't match")
}
if pk.PubKeyAlgo != sig.PubKeyAlgo {
return errors.InvalidArgumentError("public key and signature use different algorithms")
}
switch pk.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
rsaPublicKey, _ := pk.PublicKey.(*rsa.PublicKey)
err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes)
if err != nil {
return errors.SignatureError("RSA verification failure")
}
return nil
case PubKeyAlgoDSA:
dsaPublicKey, _ := pk.PublicKey.(*dsa.PublicKey)
// Need to truncate hashBytes to match FIPS 186-3 section 4.6.
subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8
if len(hashBytes) > subgroupSize {
hashBytes = hashBytes[:subgroupSize]
}
if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) {
return errors.SignatureError("DSA verification failure")
}
return nil
case PubKeyAlgoECDSA:
ecdsaPublicKey := pk.PublicKey.(*ecdsa.PublicKey)
if !ecdsa.Verify(ecdsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.ECDSASigR.bytes), new(big.Int).SetBytes(sig.ECDSASigS.bytes)) {
return errors.SignatureError("ECDSA verification failure")
}
return nil
default:
return errors.SignatureError("Unsupported public key algorithm used in signature")
}
panic("unreachable")
}
// VerifySignatureV3 returns nil iff sig is a valid signature, made by this
// public key, of the data hashed into signed. signed is mutated by this call.
func (pk *PublicKey) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) {
if !pk.CanSign() {
return errors.InvalidArgumentError("public key cannot generate signatures")
}
suffix := make([]byte, 5)
suffix[0] = byte(sig.SigType)
binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix()))
signed.Write(suffix)
hashBytes := signed.Sum(nil)
if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] {
return errors.SignatureError("hash tag doesn't match")
}
if pk.PubKeyAlgo != sig.PubKeyAlgo {
return errors.InvalidArgumentError("public key and signature use different algorithms")
}
switch pk.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
rsaPublicKey := pk.PublicKey.(*rsa.PublicKey)
if err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes); err != nil {
return errors.SignatureError("RSA verification failure")
}
return
case PubKeyAlgoDSA:
dsaPublicKey := pk.PublicKey.(*dsa.PublicKey)
// Need to truncate hashBytes to match FIPS 186-3 section 4.6.
subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8
if len(hashBytes) > subgroupSize {
hashBytes = hashBytes[:subgroupSize]
}
if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) {
return errors.SignatureError("DSA verification failure")
}
return nil
default:
panic("shouldn't happen")
}
panic("unreachable")
}
// keySignatureHash returns a Hash of the message that needs to be signed for
// pk to assert a subkey relationship to signed.
func keySignatureHash(pk, signed signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) {
if !hashFunc.Available() {
return nil, errors.UnsupportedError("hash function")
}
h = hashFunc.New()
// RFC 4880, section 5.2.4
pk.SerializeSignaturePrefix(h)
pk.serializeWithoutHeaders(h)
signed.SerializeSignaturePrefix(h)
signed.serializeWithoutHeaders(h)
return
}
// VerifyKeySignature returns nil iff sig is a valid signature, made by this
// public key, of signed.
func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error {
h, err := keySignatureHash(pk, signed, sig.Hash)
if err != nil {
return err
}
if err = pk.VerifySignature(h, sig); err != nil {
return err
}
if sig.FlagSign {
// Signing subkeys must be cross-signed. See
// https://www.gnupg.org/faq/subkey-cross-certify.html.
if sig.EmbeddedSignature == nil {
return errors.StructuralError("signing subkey is missing cross-signature")
}
// Verify the cross-signature. This is calculated over the same
// data as the main signature, so we cannot just recursively
// call signed.VerifyKeySignature(...)
if h, err = keySignatureHash(pk, signed, sig.EmbeddedSignature.Hash); err != nil {
return errors.StructuralError("error while hashing for cross-signature: " + err.Error())
}
if err := signed.VerifySignature(h, sig.EmbeddedSignature); err != nil {
return errors.StructuralError("error while verifying cross-signature: " + err.Error())
}
}
return nil
}
func keyRevocationHash(pk signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) {
if !hashFunc.Available() {
return nil, errors.UnsupportedError("hash function")
}
h = hashFunc.New()
// RFC 4880, section 5.2.4
pk.SerializeSignaturePrefix(h)
pk.serializeWithoutHeaders(h)
return
}
// VerifyRevocationSignature returns nil iff sig is a valid signature, made by this
// public key.
func (pk *PublicKey) VerifyRevocationSignature(sig *Signature) (err error) {
h, err := keyRevocationHash(pk, sig.Hash)
if err != nil {
return err
}
return pk.VerifySignature(h, sig)
}
// userIdSignatureHash returns a Hash of the message that needs to be signed
// to assert that pk is a valid key for id.
func userIdSignatureHash(id string, pk *PublicKey, hashFunc crypto.Hash) (h hash.Hash, err error) {
if !hashFunc.Available() {
return nil, errors.UnsupportedError("hash function")
}
h = hashFunc.New()
// RFC 4880, section 5.2.4
pk.SerializeSignaturePrefix(h)
pk.serializeWithoutHeaders(h)
var buf [5]byte
buf[0] = 0xb4
buf[1] = byte(len(id) >> 24)
buf[2] = byte(len(id) >> 16)
buf[3] = byte(len(id) >> 8)
buf[4] = byte(len(id))
h.Write(buf[:])
h.Write([]byte(id))
return
}
// VerifyUserIdSignature returns nil iff sig is a valid signature, made by this
// public key, that id is the identity of pub.
func (pk *PublicKey) VerifyUserIdSignature(id string, pub *PublicKey, sig *Signature) (err error) {
h, err := userIdSignatureHash(id, pub, sig.Hash)
if err != nil {
return err
}
return pk.VerifySignature(h, sig)
}
// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this
// public key, that id is the identity of pub.
func (pk *PublicKey) VerifyUserIdSignatureV3(id string, pub *PublicKey, sig *SignatureV3) (err error) {
h, err := userIdSignatureV3Hash(id, pub, sig.Hash)
if err != nil {
return err
}
return pk.VerifySignatureV3(h, sig)
}
// KeyIdString returns the public key's fingerprint in capital hex
// (e.g. "6C7EE1B8621CC013").
func (pk *PublicKey) KeyIdString() string {
return fmt.Sprintf("%X", pk.Fingerprint[12:20])
}
// KeyIdShortString returns the short form of public key's fingerprint
// in capital hex, as shown by gpg --list-keys (e.g. "621CC013").
func (pk *PublicKey) KeyIdShortString() string {
return fmt.Sprintf("%X", pk.Fingerprint[16:20])
}
// A parsedMPI is used to store the contents of a big integer, along with the
// bit length that was specified in the original input. This allows the MPI to
// be reserialized exactly.
type parsedMPI struct {
bytes []byte
bitLength uint16
}
// writeMPIs is a utility function for serializing several big integers to the
// given Writer.
func writeMPIs(w io.Writer, mpis ...parsedMPI) (err error) {
for _, mpi := range mpis {
err = writeMPI(w, mpi.bitLength, mpi.bytes)
if err != nil {
return
}
}
return
}
// BitLength returns the bit length for the given public key.
func (pk *PublicKey) BitLength() (bitLength uint16, err error) {
switch pk.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
bitLength = pk.n.bitLength
case PubKeyAlgoDSA:
bitLength = pk.p.bitLength
case PubKeyAlgoElGamal:
bitLength = pk.p.bitLength
default:
err = errors.InvalidArgumentError("bad public-key algorithm")
}
return
}

View File

@ -0,0 +1,280 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"crypto"
"crypto/md5"
"crypto/rsa"
"encoding/binary"
"fmt"
"hash"
"io"
"math/big"
"strconv"
"time"
"golang.org/x/crypto/openpgp/errors"
)
// PublicKeyV3 represents older, version 3 public keys. These keys are less secure and
// should not be used for signing or encrypting. They are supported here only for
// parsing version 3 key material and validating signatures.
// See RFC 4880, section 5.5.2.
type PublicKeyV3 struct {
CreationTime time.Time
DaysToExpire uint16
PubKeyAlgo PublicKeyAlgorithm
PublicKey *rsa.PublicKey
Fingerprint [16]byte
KeyId uint64
IsSubkey bool
n, e parsedMPI
}
// newRSAPublicKeyV3 returns a PublicKey that wraps the given rsa.PublicKey.
// Included here for testing purposes only. RFC 4880, section 5.5.2:
// "an implementation MUST NOT generate a V3 key, but MAY accept it."
func newRSAPublicKeyV3(creationTime time.Time, pub *rsa.PublicKey) *PublicKeyV3 {
pk := &PublicKeyV3{
CreationTime: creationTime,
PublicKey: pub,
n: fromBig(pub.N),
e: fromBig(big.NewInt(int64(pub.E))),
}
pk.setFingerPrintAndKeyId()
return pk
}
func (pk *PublicKeyV3) parse(r io.Reader) (err error) {
// RFC 4880, section 5.5.2
var buf [8]byte
if _, err = readFull(r, buf[:]); err != nil {
return
}
if buf[0] < 2 || buf[0] > 3 {
return errors.UnsupportedError("public key version")
}
pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0)
pk.DaysToExpire = binary.BigEndian.Uint16(buf[5:7])
pk.PubKeyAlgo = PublicKeyAlgorithm(buf[7])
switch pk.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
err = pk.parseRSA(r)
default:
err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo)))
}
if err != nil {
return
}
pk.setFingerPrintAndKeyId()
return
}
func (pk *PublicKeyV3) setFingerPrintAndKeyId() {
// RFC 4880, section 12.2
fingerPrint := md5.New()
fingerPrint.Write(pk.n.bytes)
fingerPrint.Write(pk.e.bytes)
fingerPrint.Sum(pk.Fingerprint[:0])
pk.KeyId = binary.BigEndian.Uint64(pk.n.bytes[len(pk.n.bytes)-8:])
}
// parseRSA parses RSA public key material from the given Reader. See RFC 4880,
// section 5.5.2.
func (pk *PublicKeyV3) parseRSA(r io.Reader) (err error) {
if pk.n.bytes, pk.n.bitLength, err = readMPI(r); err != nil {
return
}
if pk.e.bytes, pk.e.bitLength, err = readMPI(r); err != nil {
return
}
// RFC 4880 Section 12.2 requires the low 8 bytes of the
// modulus to form the key id.
if len(pk.n.bytes) < 8 {
return errors.StructuralError("v3 public key modulus is too short")
}
if len(pk.e.bytes) > 3 {
err = errors.UnsupportedError("large public exponent")
return
}
rsa := &rsa.PublicKey{N: new(big.Int).SetBytes(pk.n.bytes)}
for i := 0; i < len(pk.e.bytes); i++ {
rsa.E <<= 8
rsa.E |= int(pk.e.bytes[i])
}
pk.PublicKey = rsa
return
}
// SerializeSignaturePrefix writes the prefix for this public key to the given Writer.
// The prefix is used when calculating a signature over this public key. See
// RFC 4880, section 5.2.4.
func (pk *PublicKeyV3) SerializeSignaturePrefix(w io.Writer) {
var pLength uint16
switch pk.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
pLength += 2 + uint16(len(pk.n.bytes))
pLength += 2 + uint16(len(pk.e.bytes))
default:
panic("unknown public key algorithm")
}
pLength += 6
w.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)})
return
}
func (pk *PublicKeyV3) Serialize(w io.Writer) (err error) {
length := 8 // 8 byte header
switch pk.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
length += 2 + len(pk.n.bytes)
length += 2 + len(pk.e.bytes)
default:
panic("unknown public key algorithm")
}
packetType := packetTypePublicKey
if pk.IsSubkey {
packetType = packetTypePublicSubkey
}
if err = serializeHeader(w, packetType, length); err != nil {
return
}
return pk.serializeWithoutHeaders(w)
}
// serializeWithoutHeaders marshals the PublicKey to w in the form of an
// OpenPGP public key packet, not including the packet header.
func (pk *PublicKeyV3) serializeWithoutHeaders(w io.Writer) (err error) {
var buf [8]byte
// Version 3
buf[0] = 3
// Creation time
t := uint32(pk.CreationTime.Unix())
buf[1] = byte(t >> 24)
buf[2] = byte(t >> 16)
buf[3] = byte(t >> 8)
buf[4] = byte(t)
// Days to expire
buf[5] = byte(pk.DaysToExpire >> 8)
buf[6] = byte(pk.DaysToExpire)
// Public key algorithm
buf[7] = byte(pk.PubKeyAlgo)
if _, err = w.Write(buf[:]); err != nil {
return
}
switch pk.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
return writeMPIs(w, pk.n, pk.e)
}
return errors.InvalidArgumentError("bad public-key algorithm")
}
// CanSign returns true iff this public key can generate signatures
func (pk *PublicKeyV3) CanSign() bool {
return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly
}
// VerifySignatureV3 returns nil iff sig is a valid signature, made by this
// public key, of the data hashed into signed. signed is mutated by this call.
func (pk *PublicKeyV3) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) {
if !pk.CanSign() {
return errors.InvalidArgumentError("public key cannot generate signatures")
}
suffix := make([]byte, 5)
suffix[0] = byte(sig.SigType)
binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix()))
signed.Write(suffix)
hashBytes := signed.Sum(nil)
if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] {
return errors.SignatureError("hash tag doesn't match")
}
if pk.PubKeyAlgo != sig.PubKeyAlgo {
return errors.InvalidArgumentError("public key and signature use different algorithms")
}
switch pk.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
if err = rsa.VerifyPKCS1v15(pk.PublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes); err != nil {
return errors.SignatureError("RSA verification failure")
}
return
default:
// V3 public keys only support RSA.
panic("shouldn't happen")
}
panic("unreachable")
}
// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this
// public key, that id is the identity of pub.
func (pk *PublicKeyV3) VerifyUserIdSignatureV3(id string, pub *PublicKeyV3, sig *SignatureV3) (err error) {
h, err := userIdSignatureV3Hash(id, pk, sig.Hash)
if err != nil {
return err
}
return pk.VerifySignatureV3(h, sig)
}
// VerifyKeySignatureV3 returns nil iff sig is a valid signature, made by this
// public key, of signed.
func (pk *PublicKeyV3) VerifyKeySignatureV3(signed *PublicKeyV3, sig *SignatureV3) (err error) {
h, err := keySignatureHash(pk, signed, sig.Hash)
if err != nil {
return err
}
return pk.VerifySignatureV3(h, sig)
}
// userIdSignatureV3Hash returns a Hash of the message that needs to be signed
// to assert that pk is a valid key for id.
func userIdSignatureV3Hash(id string, pk signingKey, hfn crypto.Hash) (h hash.Hash, err error) {
if !hfn.Available() {
return nil, errors.UnsupportedError("hash function")
}
h = hfn.New()
// RFC 4880, section 5.2.4
pk.SerializeSignaturePrefix(h)
pk.serializeWithoutHeaders(h)
h.Write([]byte(id))
return
}
// KeyIdString returns the public key's fingerprint in capital hex
// (e.g. "6C7EE1B8621CC013").
func (pk *PublicKeyV3) KeyIdString() string {
return fmt.Sprintf("%X", pk.KeyId)
}
// KeyIdShortString returns the short form of public key's fingerprint
// in capital hex, as shown by gpg --list-keys (e.g. "621CC013").
func (pk *PublicKeyV3) KeyIdShortString() string {
return fmt.Sprintf("%X", pk.KeyId&0xFFFFFFFF)
}
// BitLength returns the bit length for the given public key.
func (pk *PublicKeyV3) BitLength() (bitLength uint16, err error) {
switch pk.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
bitLength = pk.n.bitLength
default:
err = errors.InvalidArgumentError("bad public-key algorithm")
}
return
}

76
vendor/golang.org/x/crypto/openpgp/packet/reader.go generated vendored Normal file
View File

@ -0,0 +1,76 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"golang.org/x/crypto/openpgp/errors"
"io"
)
// Reader reads packets from an io.Reader and allows packets to be 'unread' so
// that they result from the next call to Next.
type Reader struct {
q []Packet
readers []io.Reader
}
// New io.Readers are pushed when a compressed or encrypted packet is processed
// and recursively treated as a new source of packets. However, a carefully
// crafted packet can trigger an infinite recursive sequence of packets. See
// http://mumble.net/~campbell/misc/pgp-quine
// https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2013-4402
// This constant limits the number of recursive packets that may be pushed.
const maxReaders = 32
// Next returns the most recently unread Packet, or reads another packet from
// the top-most io.Reader. Unknown packet types are skipped.
func (r *Reader) Next() (p Packet, err error) {
if len(r.q) > 0 {
p = r.q[len(r.q)-1]
r.q = r.q[:len(r.q)-1]
return
}
for len(r.readers) > 0 {
p, err = Read(r.readers[len(r.readers)-1])
if err == nil {
return
}
if err == io.EOF {
r.readers = r.readers[:len(r.readers)-1]
continue
}
if _, ok := err.(errors.UnknownPacketTypeError); !ok {
return nil, err
}
}
return nil, io.EOF
}
// Push causes the Reader to start reading from a new io.Reader. When an EOF
// error is seen from the new io.Reader, it is popped and the Reader continues
// to read from the next most recent io.Reader. Push returns a StructuralError
// if pushing the reader would exceed the maximum recursion level, otherwise it
// returns nil.
func (r *Reader) Push(reader io.Reader) (err error) {
if len(r.readers) >= maxReaders {
return errors.StructuralError("too many layers of packets")
}
r.readers = append(r.readers, reader)
return nil
}
// Unread causes the given Packet to be returned from the next call to Next.
func (r *Reader) Unread(p Packet) {
r.q = append(r.q, p)
}
func NewReader(r io.Reader) *Reader {
return &Reader{
q: nil,
readers: []io.Reader{r},
}
}

731
vendor/golang.org/x/crypto/openpgp/packet/signature.go generated vendored Normal file
View File

@ -0,0 +1,731 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"bytes"
"crypto"
"crypto/dsa"
"crypto/ecdsa"
"encoding/asn1"
"encoding/binary"
"hash"
"io"
"math/big"
"strconv"
"time"
"golang.org/x/crypto/openpgp/errors"
"golang.org/x/crypto/openpgp/s2k"
)
const (
// See RFC 4880, section 5.2.3.21 for details.
KeyFlagCertify = 1 << iota
KeyFlagSign
KeyFlagEncryptCommunications
KeyFlagEncryptStorage
)
// Signature represents a signature. See RFC 4880, section 5.2.
type Signature struct {
SigType SignatureType
PubKeyAlgo PublicKeyAlgorithm
Hash crypto.Hash
// HashSuffix is extra data that is hashed in after the signed data.
HashSuffix []byte
// HashTag contains the first two bytes of the hash for fast rejection
// of bad signed data.
HashTag [2]byte
CreationTime time.Time
RSASignature parsedMPI
DSASigR, DSASigS parsedMPI
ECDSASigR, ECDSASigS parsedMPI
// rawSubpackets contains the unparsed subpackets, in order.
rawSubpackets []outputSubpacket
// The following are optional so are nil when not included in the
// signature.
SigLifetimeSecs, KeyLifetimeSecs *uint32
PreferredSymmetric, PreferredHash, PreferredCompression []uint8
IssuerKeyId *uint64
IsPrimaryId *bool
// FlagsValid is set if any flags were given. See RFC 4880, section
// 5.2.3.21 for details.
FlagsValid bool
FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage bool
// RevocationReason is set if this signature has been revoked.
// See RFC 4880, section 5.2.3.23 for details.
RevocationReason *uint8
RevocationReasonText string
// MDC is set if this signature has a feature packet that indicates
// support for MDC subpackets.
MDC bool
// EmbeddedSignature, if non-nil, is a signature of the parent key, by
// this key. This prevents an attacker from claiming another's signing
// subkey as their own.
EmbeddedSignature *Signature
outSubpackets []outputSubpacket
}
func (sig *Signature) parse(r io.Reader) (err error) {
// RFC 4880, section 5.2.3
var buf [5]byte
_, err = readFull(r, buf[:1])
if err != nil {
return
}
if buf[0] != 4 {
err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0])))
return
}
_, err = readFull(r, buf[:5])
if err != nil {
return
}
sig.SigType = SignatureType(buf[0])
sig.PubKeyAlgo = PublicKeyAlgorithm(buf[1])
switch sig.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA:
default:
err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo)))
return
}
var ok bool
sig.Hash, ok = s2k.HashIdToHash(buf[2])
if !ok {
return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2])))
}
hashedSubpacketsLength := int(buf[3])<<8 | int(buf[4])
l := 6 + hashedSubpacketsLength
sig.HashSuffix = make([]byte, l+6)
sig.HashSuffix[0] = 4
copy(sig.HashSuffix[1:], buf[:5])
hashedSubpackets := sig.HashSuffix[6:l]
_, err = readFull(r, hashedSubpackets)
if err != nil {
return
}
// See RFC 4880, section 5.2.4
trailer := sig.HashSuffix[l:]
trailer[0] = 4
trailer[1] = 0xff
trailer[2] = uint8(l >> 24)
trailer[3] = uint8(l >> 16)
trailer[4] = uint8(l >> 8)
trailer[5] = uint8(l)
err = parseSignatureSubpackets(sig, hashedSubpackets, true)
if err != nil {
return
}
_, err = readFull(r, buf[:2])
if err != nil {
return
}
unhashedSubpacketsLength := int(buf[0])<<8 | int(buf[1])
unhashedSubpackets := make([]byte, unhashedSubpacketsLength)
_, err = readFull(r, unhashedSubpackets)
if err != nil {
return
}
err = parseSignatureSubpackets(sig, unhashedSubpackets, false)
if err != nil {
return
}
_, err = readFull(r, sig.HashTag[:2])
if err != nil {
return
}
switch sig.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
sig.RSASignature.bytes, sig.RSASignature.bitLength, err = readMPI(r)
case PubKeyAlgoDSA:
sig.DSASigR.bytes, sig.DSASigR.bitLength, err = readMPI(r)
if err == nil {
sig.DSASigS.bytes, sig.DSASigS.bitLength, err = readMPI(r)
}
case PubKeyAlgoECDSA:
sig.ECDSASigR.bytes, sig.ECDSASigR.bitLength, err = readMPI(r)
if err == nil {
sig.ECDSASigS.bytes, sig.ECDSASigS.bitLength, err = readMPI(r)
}
default:
panic("unreachable")
}
return
}
// parseSignatureSubpackets parses subpackets of the main signature packet. See
// RFC 4880, section 5.2.3.1.
func parseSignatureSubpackets(sig *Signature, subpackets []byte, isHashed bool) (err error) {
for len(subpackets) > 0 {
subpackets, err = parseSignatureSubpacket(sig, subpackets, isHashed)
if err != nil {
return
}
}
if sig.CreationTime.IsZero() {
err = errors.StructuralError("no creation time in signature")
}
return
}
type signatureSubpacketType uint8
const (
creationTimeSubpacket signatureSubpacketType = 2
signatureExpirationSubpacket signatureSubpacketType = 3
keyExpirationSubpacket signatureSubpacketType = 9
prefSymmetricAlgosSubpacket signatureSubpacketType = 11
issuerSubpacket signatureSubpacketType = 16
prefHashAlgosSubpacket signatureSubpacketType = 21
prefCompressionSubpacket signatureSubpacketType = 22
primaryUserIdSubpacket signatureSubpacketType = 25
keyFlagsSubpacket signatureSubpacketType = 27
reasonForRevocationSubpacket signatureSubpacketType = 29
featuresSubpacket signatureSubpacketType = 30
embeddedSignatureSubpacket signatureSubpacketType = 32
)
// parseSignatureSubpacket parses a single subpacket. len(subpacket) is >= 1.
func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (rest []byte, err error) {
// RFC 4880, section 5.2.3.1
var (
length uint32
packetType signatureSubpacketType
isCritical bool
)
switch {
case subpacket[0] < 192:
length = uint32(subpacket[0])
subpacket = subpacket[1:]
case subpacket[0] < 255:
if len(subpacket) < 2 {
goto Truncated
}
length = uint32(subpacket[0]-192)<<8 + uint32(subpacket[1]) + 192
subpacket = subpacket[2:]
default:
if len(subpacket) < 5 {
goto Truncated
}
length = uint32(subpacket[1])<<24 |
uint32(subpacket[2])<<16 |
uint32(subpacket[3])<<8 |
uint32(subpacket[4])
subpacket = subpacket[5:]
}
if length > uint32(len(subpacket)) {
goto Truncated
}
rest = subpacket[length:]
subpacket = subpacket[:length]
if len(subpacket) == 0 {
err = errors.StructuralError("zero length signature subpacket")
return
}
packetType = signatureSubpacketType(subpacket[0] & 0x7f)
isCritical = subpacket[0]&0x80 == 0x80
subpacket = subpacket[1:]
sig.rawSubpackets = append(sig.rawSubpackets, outputSubpacket{isHashed, packetType, isCritical, subpacket})
switch packetType {
case creationTimeSubpacket:
if !isHashed {
err = errors.StructuralError("signature creation time in non-hashed area")
return
}
if len(subpacket) != 4 {
err = errors.StructuralError("signature creation time not four bytes")
return
}
t := binary.BigEndian.Uint32(subpacket)
sig.CreationTime = time.Unix(int64(t), 0)
case signatureExpirationSubpacket:
// Signature expiration time, section 5.2.3.10
if !isHashed {
return
}
if len(subpacket) != 4 {
err = errors.StructuralError("expiration subpacket with bad length")
return
}
sig.SigLifetimeSecs = new(uint32)
*sig.SigLifetimeSecs = binary.BigEndian.Uint32(subpacket)
case keyExpirationSubpacket:
// Key expiration time, section 5.2.3.6
if !isHashed {
return
}
if len(subpacket) != 4 {
err = errors.StructuralError("key expiration subpacket with bad length")
return
}
sig.KeyLifetimeSecs = new(uint32)
*sig.KeyLifetimeSecs = binary.BigEndian.Uint32(subpacket)
case prefSymmetricAlgosSubpacket:
// Preferred symmetric algorithms, section 5.2.3.7
if !isHashed {
return
}
sig.PreferredSymmetric = make([]byte, len(subpacket))
copy(sig.PreferredSymmetric, subpacket)
case issuerSubpacket:
// Issuer, section 5.2.3.5
if len(subpacket) != 8 {
err = errors.StructuralError("issuer subpacket with bad length")
return
}
sig.IssuerKeyId = new(uint64)
*sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket)
case prefHashAlgosSubpacket:
// Preferred hash algorithms, section 5.2.3.8
if !isHashed {
return
}
sig.PreferredHash = make([]byte, len(subpacket))
copy(sig.PreferredHash, subpacket)
case prefCompressionSubpacket:
// Preferred compression algorithms, section 5.2.3.9
if !isHashed {
return
}
sig.PreferredCompression = make([]byte, len(subpacket))
copy(sig.PreferredCompression, subpacket)
case primaryUserIdSubpacket:
// Primary User ID, section 5.2.3.19
if !isHashed {
return
}
if len(subpacket) != 1 {
err = errors.StructuralError("primary user id subpacket with bad length")
return
}
sig.IsPrimaryId = new(bool)
if subpacket[0] > 0 {
*sig.IsPrimaryId = true
}
case keyFlagsSubpacket:
// Key flags, section 5.2.3.21
if !isHashed {
return
}
if len(subpacket) == 0 {
err = errors.StructuralError("empty key flags subpacket")
return
}
sig.FlagsValid = true
if subpacket[0]&KeyFlagCertify != 0 {
sig.FlagCertify = true
}
if subpacket[0]&KeyFlagSign != 0 {
sig.FlagSign = true
}
if subpacket[0]&KeyFlagEncryptCommunications != 0 {
sig.FlagEncryptCommunications = true
}
if subpacket[0]&KeyFlagEncryptStorage != 0 {
sig.FlagEncryptStorage = true
}
case reasonForRevocationSubpacket:
// Reason For Revocation, section 5.2.3.23
if !isHashed {
return
}
if len(subpacket) == 0 {
err = errors.StructuralError("empty revocation reason subpacket")
return
}
sig.RevocationReason = new(uint8)
*sig.RevocationReason = subpacket[0]
sig.RevocationReasonText = string(subpacket[1:])
case featuresSubpacket:
// Features subpacket, section 5.2.3.24 specifies a very general
// mechanism for OpenPGP implementations to signal support for new
// features. In practice, the subpacket is used exclusively to
// indicate support for MDC-protected encryption.
sig.MDC = len(subpacket) >= 1 && subpacket[0]&1 == 1
case embeddedSignatureSubpacket:
// Only usage is in signatures that cross-certify
// signing subkeys. section 5.2.3.26 describes the
// format, with its usage described in section 11.1
if sig.EmbeddedSignature != nil {
err = errors.StructuralError("Cannot have multiple embedded signatures")
return
}
sig.EmbeddedSignature = new(Signature)
// Embedded signatures are required to be v4 signatures see
// section 12.1. However, we only parse v4 signatures in this
// file anyway.
if err := sig.EmbeddedSignature.parse(bytes.NewBuffer(subpacket)); err != nil {
return nil, err
}
if sigType := sig.EmbeddedSignature.SigType; sigType != SigTypePrimaryKeyBinding {
return nil, errors.StructuralError("cross-signature has unexpected type " + strconv.Itoa(int(sigType)))
}
default:
if isCritical {
err = errors.UnsupportedError("unknown critical signature subpacket type " + strconv.Itoa(int(packetType)))
return
}
}
return
Truncated:
err = errors.StructuralError("signature subpacket truncated")
return
}
// subpacketLengthLength returns the length, in bytes, of an encoded length value.
func subpacketLengthLength(length int) int {
if length < 192 {
return 1
}
if length < 16320 {
return 2
}
return 5
}
// serializeSubpacketLength marshals the given length into to.
func serializeSubpacketLength(to []byte, length int) int {
// RFC 4880, Section 4.2.2.
if length < 192 {
to[0] = byte(length)
return 1
}
if length < 16320 {
length -= 192
to[0] = byte((length >> 8) + 192)
to[1] = byte(length)
return 2
}
to[0] = 255
to[1] = byte(length >> 24)
to[2] = byte(length >> 16)
to[3] = byte(length >> 8)
to[4] = byte(length)
return 5
}
// subpacketsLength returns the serialized length, in bytes, of the given
// subpackets.
func subpacketsLength(subpackets []outputSubpacket, hashed bool) (length int) {
for _, subpacket := range subpackets {
if subpacket.hashed == hashed {
length += subpacketLengthLength(len(subpacket.contents) + 1)
length += 1 // type byte
length += len(subpacket.contents)
}
}
return
}
// serializeSubpackets marshals the given subpackets into to.
func serializeSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) {
for _, subpacket := range subpackets {
if subpacket.hashed == hashed {
n := serializeSubpacketLength(to, len(subpacket.contents)+1)
to[n] = byte(subpacket.subpacketType)
to = to[1+n:]
n = copy(to, subpacket.contents)
to = to[n:]
}
}
return
}
// KeyExpired returns whether sig is a self-signature of a key that has
// expired.
func (sig *Signature) KeyExpired(currentTime time.Time) bool {
if sig.KeyLifetimeSecs == nil {
return false
}
expiry := sig.CreationTime.Add(time.Duration(*sig.KeyLifetimeSecs) * time.Second)
return currentTime.After(expiry)
}
// buildHashSuffix constructs the HashSuffix member of sig in preparation for signing.
func (sig *Signature) buildHashSuffix() (err error) {
hashedSubpacketsLen := subpacketsLength(sig.outSubpackets, true)
var ok bool
l := 6 + hashedSubpacketsLen
sig.HashSuffix = make([]byte, l+6)
sig.HashSuffix[0] = 4
sig.HashSuffix[1] = uint8(sig.SigType)
sig.HashSuffix[2] = uint8(sig.PubKeyAlgo)
sig.HashSuffix[3], ok = s2k.HashToHashId(sig.Hash)
if !ok {
sig.HashSuffix = nil
return errors.InvalidArgumentError("hash cannot be represented in OpenPGP: " + strconv.Itoa(int(sig.Hash)))
}
sig.HashSuffix[4] = byte(hashedSubpacketsLen >> 8)
sig.HashSuffix[5] = byte(hashedSubpacketsLen)
serializeSubpackets(sig.HashSuffix[6:l], sig.outSubpackets, true)
trailer := sig.HashSuffix[l:]
trailer[0] = 4
trailer[1] = 0xff
trailer[2] = byte(l >> 24)
trailer[3] = byte(l >> 16)
trailer[4] = byte(l >> 8)
trailer[5] = byte(l)
return
}
func (sig *Signature) signPrepareHash(h hash.Hash) (digest []byte, err error) {
err = sig.buildHashSuffix()
if err != nil {
return
}
h.Write(sig.HashSuffix)
digest = h.Sum(nil)
copy(sig.HashTag[:], digest)
return
}
// Sign signs a message with a private key. The hash, h, must contain
// the hash of the message to be signed and will be mutated by this function.
// On success, the signature is stored in sig. Call Serialize to write it out.
// If config is nil, sensible defaults will be used.
func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err error) {
sig.outSubpackets = sig.buildSubpackets()
digest, err := sig.signPrepareHash(h)
if err != nil {
return
}
switch priv.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
// supports both *rsa.PrivateKey and crypto.Signer
sig.RSASignature.bytes, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash)
sig.RSASignature.bitLength = uint16(8 * len(sig.RSASignature.bytes))
case PubKeyAlgoDSA:
dsaPriv := priv.PrivateKey.(*dsa.PrivateKey)
// Need to truncate hashBytes to match FIPS 186-3 section 4.6.
subgroupSize := (dsaPriv.Q.BitLen() + 7) / 8
if len(digest) > subgroupSize {
digest = digest[:subgroupSize]
}
r, s, err := dsa.Sign(config.Random(), dsaPriv, digest)
if err == nil {
sig.DSASigR.bytes = r.Bytes()
sig.DSASigR.bitLength = uint16(8 * len(sig.DSASigR.bytes))
sig.DSASigS.bytes = s.Bytes()
sig.DSASigS.bitLength = uint16(8 * len(sig.DSASigS.bytes))
}
case PubKeyAlgoECDSA:
var r, s *big.Int
if pk, ok := priv.PrivateKey.(*ecdsa.PrivateKey); ok {
// direct support, avoid asn1 wrapping/unwrapping
r, s, err = ecdsa.Sign(config.Random(), pk, digest)
} else {
var b []byte
b, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, nil)
if err == nil {
r, s, err = unwrapECDSASig(b)
}
}
if err == nil {
sig.ECDSASigR = fromBig(r)
sig.ECDSASigS = fromBig(s)
}
default:
err = errors.UnsupportedError("public key algorithm: " + strconv.Itoa(int(sig.PubKeyAlgo)))
}
return
}
// unwrapECDSASig parses the two integer components of an ASN.1-encoded ECDSA
// signature.
func unwrapECDSASig(b []byte) (r, s *big.Int, err error) {
var ecsdaSig struct {
R, S *big.Int
}
_, err = asn1.Unmarshal(b, &ecsdaSig)
if err != nil {
return
}
return ecsdaSig.R, ecsdaSig.S, nil
}
// SignUserId computes a signature from priv, asserting that pub is a valid
// key for the identity id. On success, the signature is stored in sig. Call
// Serialize to write it out.
// If config is nil, sensible defaults will be used.
func (sig *Signature) SignUserId(id string, pub *PublicKey, priv *PrivateKey, config *Config) error {
h, err := userIdSignatureHash(id, pub, sig.Hash)
if err != nil {
return err
}
return sig.Sign(h, priv, config)
}
// SignKey computes a signature from priv, asserting that pub is a subkey. On
// success, the signature is stored in sig. Call Serialize to write it out.
// If config is nil, sensible defaults will be used.
func (sig *Signature) SignKey(pub *PublicKey, priv *PrivateKey, config *Config) error {
h, err := keySignatureHash(&priv.PublicKey, pub, sig.Hash)
if err != nil {
return err
}
return sig.Sign(h, priv, config)
}
// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been
// called first.
func (sig *Signature) Serialize(w io.Writer) (err error) {
if len(sig.outSubpackets) == 0 {
sig.outSubpackets = sig.rawSubpackets
}
if sig.RSASignature.bytes == nil && sig.DSASigR.bytes == nil && sig.ECDSASigR.bytes == nil {
return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize")
}
sigLength := 0
switch sig.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
sigLength = 2 + len(sig.RSASignature.bytes)
case PubKeyAlgoDSA:
sigLength = 2 + len(sig.DSASigR.bytes)
sigLength += 2 + len(sig.DSASigS.bytes)
case PubKeyAlgoECDSA:
sigLength = 2 + len(sig.ECDSASigR.bytes)
sigLength += 2 + len(sig.ECDSASigS.bytes)
default:
panic("impossible")
}
unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false)
length := len(sig.HashSuffix) - 6 /* trailer not included */ +
2 /* length of unhashed subpackets */ + unhashedSubpacketsLen +
2 /* hash tag */ + sigLength
err = serializeHeader(w, packetTypeSignature, length)
if err != nil {
return
}
_, err = w.Write(sig.HashSuffix[:len(sig.HashSuffix)-6])
if err != nil {
return
}
unhashedSubpackets := make([]byte, 2+unhashedSubpacketsLen)
unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 8)
unhashedSubpackets[1] = byte(unhashedSubpacketsLen)
serializeSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false)
_, err = w.Write(unhashedSubpackets)
if err != nil {
return
}
_, err = w.Write(sig.HashTag[:])
if err != nil {
return
}
switch sig.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
err = writeMPIs(w, sig.RSASignature)
case PubKeyAlgoDSA:
err = writeMPIs(w, sig.DSASigR, sig.DSASigS)
case PubKeyAlgoECDSA:
err = writeMPIs(w, sig.ECDSASigR, sig.ECDSASigS)
default:
panic("impossible")
}
return
}
// outputSubpacket represents a subpacket to be marshaled.
type outputSubpacket struct {
hashed bool // true if this subpacket is in the hashed area.
subpacketType signatureSubpacketType
isCritical bool
contents []byte
}
func (sig *Signature) buildSubpackets() (subpackets []outputSubpacket) {
creationTime := make([]byte, 4)
binary.BigEndian.PutUint32(creationTime, uint32(sig.CreationTime.Unix()))
subpackets = append(subpackets, outputSubpacket{true, creationTimeSubpacket, false, creationTime})
if sig.IssuerKeyId != nil {
keyId := make([]byte, 8)
binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId)
subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, false, keyId})
}
if sig.SigLifetimeSecs != nil && *sig.SigLifetimeSecs != 0 {
sigLifetime := make([]byte, 4)
binary.BigEndian.PutUint32(sigLifetime, *sig.SigLifetimeSecs)
subpackets = append(subpackets, outputSubpacket{true, signatureExpirationSubpacket, true, sigLifetime})
}
// Key flags may only appear in self-signatures or certification signatures.
if sig.FlagsValid {
var flags byte
if sig.FlagCertify {
flags |= KeyFlagCertify
}
if sig.FlagSign {
flags |= KeyFlagSign
}
if sig.FlagEncryptCommunications {
flags |= KeyFlagEncryptCommunications
}
if sig.FlagEncryptStorage {
flags |= KeyFlagEncryptStorage
}
subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, false, []byte{flags}})
}
// The following subpackets may only appear in self-signatures
if sig.KeyLifetimeSecs != nil && *sig.KeyLifetimeSecs != 0 {
keyLifetime := make([]byte, 4)
binary.BigEndian.PutUint32(keyLifetime, *sig.KeyLifetimeSecs)
subpackets = append(subpackets, outputSubpacket{true, keyExpirationSubpacket, true, keyLifetime})
}
if sig.IsPrimaryId != nil && *sig.IsPrimaryId {
subpackets = append(subpackets, outputSubpacket{true, primaryUserIdSubpacket, false, []byte{1}})
}
if len(sig.PreferredSymmetric) > 0 {
subpackets = append(subpackets, outputSubpacket{true, prefSymmetricAlgosSubpacket, false, sig.PreferredSymmetric})
}
if len(sig.PreferredHash) > 0 {
subpackets = append(subpackets, outputSubpacket{true, prefHashAlgosSubpacket, false, sig.PreferredHash})
}
if len(sig.PreferredCompression) > 0 {
subpackets = append(subpackets, outputSubpacket{true, prefCompressionSubpacket, false, sig.PreferredCompression})
}
return
}

View File

@ -0,0 +1,146 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"crypto"
"encoding/binary"
"fmt"
"io"
"strconv"
"time"
"golang.org/x/crypto/openpgp/errors"
"golang.org/x/crypto/openpgp/s2k"
)
// SignatureV3 represents older version 3 signatures. These signatures are less secure
// than version 4 and should not be used to create new signatures. They are included
// here for backwards compatibility to read and validate with older key material.
// See RFC 4880, section 5.2.2.
type SignatureV3 struct {
SigType SignatureType
CreationTime time.Time
IssuerKeyId uint64
PubKeyAlgo PublicKeyAlgorithm
Hash crypto.Hash
HashTag [2]byte
RSASignature parsedMPI
DSASigR, DSASigS parsedMPI
}
func (sig *SignatureV3) parse(r io.Reader) (err error) {
// RFC 4880, section 5.2.2
var buf [8]byte
if _, err = readFull(r, buf[:1]); err != nil {
return
}
if buf[0] < 2 || buf[0] > 3 {
err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0])))
return
}
if _, err = readFull(r, buf[:1]); err != nil {
return
}
if buf[0] != 5 {
err = errors.UnsupportedError(
"invalid hashed material length " + strconv.Itoa(int(buf[0])))
return
}
// Read hashed material: signature type + creation time
if _, err = readFull(r, buf[:5]); err != nil {
return
}
sig.SigType = SignatureType(buf[0])
t := binary.BigEndian.Uint32(buf[1:5])
sig.CreationTime = time.Unix(int64(t), 0)
// Eight-octet Key ID of signer.
if _, err = readFull(r, buf[:8]); err != nil {
return
}
sig.IssuerKeyId = binary.BigEndian.Uint64(buf[:])
// Public-key and hash algorithm
if _, err = readFull(r, buf[:2]); err != nil {
return
}
sig.PubKeyAlgo = PublicKeyAlgorithm(buf[0])
switch sig.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA:
default:
err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo)))
return
}
var ok bool
if sig.Hash, ok = s2k.HashIdToHash(buf[1]); !ok {
return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2])))
}
// Two-octet field holding left 16 bits of signed hash value.
if _, err = readFull(r, sig.HashTag[:2]); err != nil {
return
}
switch sig.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
sig.RSASignature.bytes, sig.RSASignature.bitLength, err = readMPI(r)
case PubKeyAlgoDSA:
if sig.DSASigR.bytes, sig.DSASigR.bitLength, err = readMPI(r); err != nil {
return
}
sig.DSASigS.bytes, sig.DSASigS.bitLength, err = readMPI(r)
default:
panic("unreachable")
}
return
}
// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been
// called first.
func (sig *SignatureV3) Serialize(w io.Writer) (err error) {
buf := make([]byte, 8)
// Write the sig type and creation time
buf[0] = byte(sig.SigType)
binary.BigEndian.PutUint32(buf[1:5], uint32(sig.CreationTime.Unix()))
if _, err = w.Write(buf[:5]); err != nil {
return
}
// Write the issuer long key ID
binary.BigEndian.PutUint64(buf[:8], sig.IssuerKeyId)
if _, err = w.Write(buf[:8]); err != nil {
return
}
// Write public key algorithm, hash ID, and hash value
buf[0] = byte(sig.PubKeyAlgo)
hashId, ok := s2k.HashToHashId(sig.Hash)
if !ok {
return errors.UnsupportedError(fmt.Sprintf("hash function %v", sig.Hash))
}
buf[1] = hashId
copy(buf[2:4], sig.HashTag[:])
if _, err = w.Write(buf[:4]); err != nil {
return
}
if sig.RSASignature.bytes == nil && sig.DSASigR.bytes == nil {
return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize")
}
switch sig.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
err = writeMPIs(w, sig.RSASignature)
case PubKeyAlgoDSA:
err = writeMPIs(w, sig.DSASigR, sig.DSASigS)
default:
panic("impossible")
}
return
}

View File

@ -0,0 +1,155 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"bytes"
"crypto/cipher"
"io"
"strconv"
"golang.org/x/crypto/openpgp/errors"
"golang.org/x/crypto/openpgp/s2k"
)
// This is the largest session key that we'll support. Since no 512-bit cipher
// has even been seriously used, this is comfortably large.
const maxSessionKeySizeInBytes = 64
// SymmetricKeyEncrypted represents a passphrase protected session key. See RFC
// 4880, section 5.3.
type SymmetricKeyEncrypted struct {
CipherFunc CipherFunction
s2k func(out, in []byte)
encryptedKey []byte
}
const symmetricKeyEncryptedVersion = 4
func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error {
// RFC 4880, section 5.3.
var buf [2]byte
if _, err := readFull(r, buf[:]); err != nil {
return err
}
if buf[0] != symmetricKeyEncryptedVersion {
return errors.UnsupportedError("SymmetricKeyEncrypted version")
}
ske.CipherFunc = CipherFunction(buf[1])
if ske.CipherFunc.KeySize() == 0 {
return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(buf[1])))
}
var err error
ske.s2k, err = s2k.Parse(r)
if err != nil {
return err
}
encryptedKey := make([]byte, maxSessionKeySizeInBytes)
// The session key may follow. We just have to try and read to find
// out. If it exists then we limit it to maxSessionKeySizeInBytes.
n, err := readFull(r, encryptedKey)
if err != nil && err != io.ErrUnexpectedEOF {
return err
}
if n != 0 {
if n == maxSessionKeySizeInBytes {
return errors.UnsupportedError("oversized encrypted session key")
}
ske.encryptedKey = encryptedKey[:n]
}
return nil
}
// Decrypt attempts to decrypt an encrypted session key and returns the key and
// the cipher to use when decrypting a subsequent Symmetrically Encrypted Data
// packet.
func (ske *SymmetricKeyEncrypted) Decrypt(passphrase []byte) ([]byte, CipherFunction, error) {
key := make([]byte, ske.CipherFunc.KeySize())
ske.s2k(key, passphrase)
if len(ske.encryptedKey) == 0 {
return key, ske.CipherFunc, nil
}
// the IV is all zeros
iv := make([]byte, ske.CipherFunc.blockSize())
c := cipher.NewCFBDecrypter(ske.CipherFunc.new(key), iv)
plaintextKey := make([]byte, len(ske.encryptedKey))
c.XORKeyStream(plaintextKey, ske.encryptedKey)
cipherFunc := CipherFunction(plaintextKey[0])
if cipherFunc.blockSize() == 0 {
return nil, ske.CipherFunc, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc)))
}
plaintextKey = plaintextKey[1:]
if l := len(plaintextKey); l == 0 || l%cipherFunc.blockSize() != 0 {
return nil, cipherFunc, errors.StructuralError("length of decrypted key not a multiple of block size")
}
return plaintextKey, cipherFunc, nil
}
// SerializeSymmetricKeyEncrypted serializes a symmetric key packet to w. The
// packet contains a random session key, encrypted by a key derived from the
// given passphrase. The session key is returned and must be passed to
// SerializeSymmetricallyEncrypted.
// If config is nil, sensible defaults will be used.
func SerializeSymmetricKeyEncrypted(w io.Writer, passphrase []byte, config *Config) (key []byte, err error) {
cipherFunc := config.Cipher()
keySize := cipherFunc.KeySize()
if keySize == 0 {
return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc)))
}
s2kBuf := new(bytes.Buffer)
keyEncryptingKey := make([]byte, keySize)
// s2k.Serialize salts and stretches the passphrase, and writes the
// resulting key to keyEncryptingKey and the s2k descriptor to s2kBuf.
err = s2k.Serialize(s2kBuf, keyEncryptingKey, config.Random(), passphrase, &s2k.Config{Hash: config.Hash(), S2KCount: config.PasswordHashIterations()})
if err != nil {
return
}
s2kBytes := s2kBuf.Bytes()
packetLength := 2 /* header */ + len(s2kBytes) + 1 /* cipher type */ + keySize
err = serializeHeader(w, packetTypeSymmetricKeyEncrypted, packetLength)
if err != nil {
return
}
var buf [2]byte
buf[0] = symmetricKeyEncryptedVersion
buf[1] = byte(cipherFunc)
_, err = w.Write(buf[:])
if err != nil {
return
}
_, err = w.Write(s2kBytes)
if err != nil {
return
}
sessionKey := make([]byte, keySize)
_, err = io.ReadFull(config.Random(), sessionKey)
if err != nil {
return
}
iv := make([]byte, cipherFunc.blockSize())
c := cipher.NewCFBEncrypter(cipherFunc.new(keyEncryptingKey), iv)
encryptedCipherAndKey := make([]byte, keySize+1)
c.XORKeyStream(encryptedCipherAndKey, buf[1:])
c.XORKeyStream(encryptedCipherAndKey[1:], sessionKey)
_, err = w.Write(encryptedCipherAndKey)
if err != nil {
return
}
key = sessionKey
return
}

View File

@ -0,0 +1,290 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"crypto/cipher"
"crypto/sha1"
"crypto/subtle"
"golang.org/x/crypto/openpgp/errors"
"hash"
"io"
"strconv"
)
// SymmetricallyEncrypted represents a symmetrically encrypted byte string. The
// encrypted contents will consist of more OpenPGP packets. See RFC 4880,
// sections 5.7 and 5.13.
type SymmetricallyEncrypted struct {
MDC bool // true iff this is a type 18 packet and thus has an embedded MAC.
contents io.Reader
prefix []byte
}
const symmetricallyEncryptedVersion = 1
func (se *SymmetricallyEncrypted) parse(r io.Reader) error {
if se.MDC {
// See RFC 4880, section 5.13.
var buf [1]byte
_, err := readFull(r, buf[:])
if err != nil {
return err
}
if buf[0] != symmetricallyEncryptedVersion {
return errors.UnsupportedError("unknown SymmetricallyEncrypted version")
}
}
se.contents = r
return nil
}
// Decrypt returns a ReadCloser, from which the decrypted contents of the
// packet can be read. An incorrect key can, with high probability, be detected
// immediately and this will result in a KeyIncorrect error being returned.
func (se *SymmetricallyEncrypted) Decrypt(c CipherFunction, key []byte) (io.ReadCloser, error) {
keySize := c.KeySize()
if keySize == 0 {
return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(c)))
}
if len(key) != keySize {
return nil, errors.InvalidArgumentError("SymmetricallyEncrypted: incorrect key length")
}
if se.prefix == nil {
se.prefix = make([]byte, c.blockSize()+2)
_, err := readFull(se.contents, se.prefix)
if err != nil {
return nil, err
}
} else if len(se.prefix) != c.blockSize()+2 {
return nil, errors.InvalidArgumentError("can't try ciphers with different block lengths")
}
ocfbResync := OCFBResync
if se.MDC {
// MDC packets use a different form of OCFB mode.
ocfbResync = OCFBNoResync
}
s := NewOCFBDecrypter(c.new(key), se.prefix, ocfbResync)
if s == nil {
return nil, errors.ErrKeyIncorrect
}
plaintext := cipher.StreamReader{S: s, R: se.contents}
if se.MDC {
// MDC packets have an embedded hash that we need to check.
h := sha1.New()
h.Write(se.prefix)
return &seMDCReader{in: plaintext, h: h}, nil
}
// Otherwise, we just need to wrap plaintext so that it's a valid ReadCloser.
return seReader{plaintext}, nil
}
// seReader wraps an io.Reader with a no-op Close method.
type seReader struct {
in io.Reader
}
func (ser seReader) Read(buf []byte) (int, error) {
return ser.in.Read(buf)
}
func (ser seReader) Close() error {
return nil
}
const mdcTrailerSize = 1 /* tag byte */ + 1 /* length byte */ + sha1.Size
// An seMDCReader wraps an io.Reader, maintains a running hash and keeps hold
// of the most recent 22 bytes (mdcTrailerSize). Upon EOF, those bytes form an
// MDC packet containing a hash of the previous contents which is checked
// against the running hash. See RFC 4880, section 5.13.
type seMDCReader struct {
in io.Reader
h hash.Hash
trailer [mdcTrailerSize]byte
scratch [mdcTrailerSize]byte
trailerUsed int
error bool
eof bool
}
func (ser *seMDCReader) Read(buf []byte) (n int, err error) {
if ser.error {
err = io.ErrUnexpectedEOF
return
}
if ser.eof {
err = io.EOF
return
}
// If we haven't yet filled the trailer buffer then we must do that
// first.
for ser.trailerUsed < mdcTrailerSize {
n, err = ser.in.Read(ser.trailer[ser.trailerUsed:])
ser.trailerUsed += n
if err == io.EOF {
if ser.trailerUsed != mdcTrailerSize {
n = 0
err = io.ErrUnexpectedEOF
ser.error = true
return
}
ser.eof = true
n = 0
return
}
if err != nil {
n = 0
return
}
}
// If it's a short read then we read into a temporary buffer and shift
// the data into the caller's buffer.
if len(buf) <= mdcTrailerSize {
n, err = readFull(ser.in, ser.scratch[:len(buf)])
copy(buf, ser.trailer[:n])
ser.h.Write(buf[:n])
copy(ser.trailer[:], ser.trailer[n:])
copy(ser.trailer[mdcTrailerSize-n:], ser.scratch[:])
if n < len(buf) {
ser.eof = true
err = io.EOF
}
return
}
n, err = ser.in.Read(buf[mdcTrailerSize:])
copy(buf, ser.trailer[:])
ser.h.Write(buf[:n])
copy(ser.trailer[:], buf[n:])
if err == io.EOF {
ser.eof = true
}
return
}
// This is a new-format packet tag byte for a type 19 (MDC) packet.
const mdcPacketTagByte = byte(0x80) | 0x40 | 19
func (ser *seMDCReader) Close() error {
if ser.error {
return errors.SignatureError("error during reading")
}
for !ser.eof {
// We haven't seen EOF so we need to read to the end
var buf [1024]byte
_, err := ser.Read(buf[:])
if err == io.EOF {
break
}
if err != nil {
return errors.SignatureError("error during reading")
}
}
if ser.trailer[0] != mdcPacketTagByte || ser.trailer[1] != sha1.Size {
return errors.SignatureError("MDC packet not found")
}
ser.h.Write(ser.trailer[:2])
final := ser.h.Sum(nil)
if subtle.ConstantTimeCompare(final, ser.trailer[2:]) != 1 {
return errors.SignatureError("hash mismatch")
}
return nil
}
// An seMDCWriter writes through to an io.WriteCloser while maintains a running
// hash of the data written. On close, it emits an MDC packet containing the
// running hash.
type seMDCWriter struct {
w io.WriteCloser
h hash.Hash
}
func (w *seMDCWriter) Write(buf []byte) (n int, err error) {
w.h.Write(buf)
return w.w.Write(buf)
}
func (w *seMDCWriter) Close() (err error) {
var buf [mdcTrailerSize]byte
buf[0] = mdcPacketTagByte
buf[1] = sha1.Size
w.h.Write(buf[:2])
digest := w.h.Sum(nil)
copy(buf[2:], digest)
_, err = w.w.Write(buf[:])
if err != nil {
return
}
return w.w.Close()
}
// noOpCloser is like an ioutil.NopCloser, but for an io.Writer.
type noOpCloser struct {
w io.Writer
}
func (c noOpCloser) Write(data []byte) (n int, err error) {
return c.w.Write(data)
}
func (c noOpCloser) Close() error {
return nil
}
// SerializeSymmetricallyEncrypted serializes a symmetrically encrypted packet
// to w and returns a WriteCloser to which the to-be-encrypted packets can be
// written.
// If config is nil, sensible defaults will be used.
func SerializeSymmetricallyEncrypted(w io.Writer, c CipherFunction, key []byte, config *Config) (contents io.WriteCloser, err error) {
if c.KeySize() != len(key) {
return nil, errors.InvalidArgumentError("SymmetricallyEncrypted.Serialize: bad key length")
}
writeCloser := noOpCloser{w}
ciphertext, err := serializeStreamHeader(writeCloser, packetTypeSymmetricallyEncryptedMDC)
if err != nil {
return
}
_, err = ciphertext.Write([]byte{symmetricallyEncryptedVersion})
if err != nil {
return
}
block := c.new(key)
blockSize := block.BlockSize()
iv := make([]byte, blockSize)
_, err = config.Random().Read(iv)
if err != nil {
return
}
s, prefix := NewOCFBEncrypter(block, iv, OCFBNoResync)
_, err = ciphertext.Write(prefix)
if err != nil {
return
}
plaintext := cipher.StreamWriter{S: s, W: ciphertext}
h := sha1.New()
h.Write(iv)
h.Write(iv[blockSize-2:])
contents = &seMDCWriter{w: plaintext, h: h}
return
}

View File

@ -0,0 +1,91 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"bytes"
"image"
"image/jpeg"
"io"
"io/ioutil"
)
const UserAttrImageSubpacket = 1
// UserAttribute is capable of storing other types of data about a user
// beyond name, email and a text comment. In practice, user attributes are typically used
// to store a signed thumbnail photo JPEG image of the user.
// See RFC 4880, section 5.12.
type UserAttribute struct {
Contents []*OpaqueSubpacket
}
// NewUserAttributePhoto creates a user attribute packet
// containing the given images.
func NewUserAttributePhoto(photos ...image.Image) (uat *UserAttribute, err error) {
uat = new(UserAttribute)
for _, photo := range photos {
var buf bytes.Buffer
// RFC 4880, Section 5.12.1.
data := []byte{
0x10, 0x00, // Little-endian image header length (16 bytes)
0x01, // Image header version 1
0x01, // JPEG
0, 0, 0, 0, // 12 reserved octets, must be all zero.
0, 0, 0, 0,
0, 0, 0, 0}
if _, err = buf.Write(data); err != nil {
return
}
if err = jpeg.Encode(&buf, photo, nil); err != nil {
return
}
uat.Contents = append(uat.Contents, &OpaqueSubpacket{
SubType: UserAttrImageSubpacket,
Contents: buf.Bytes()})
}
return
}
// NewUserAttribute creates a new user attribute packet containing the given subpackets.
func NewUserAttribute(contents ...*OpaqueSubpacket) *UserAttribute {
return &UserAttribute{Contents: contents}
}
func (uat *UserAttribute) parse(r io.Reader) (err error) {
// RFC 4880, section 5.13
b, err := ioutil.ReadAll(r)
if err != nil {
return
}
uat.Contents, err = OpaqueSubpackets(b)
return
}
// Serialize marshals the user attribute to w in the form of an OpenPGP packet, including
// header.
func (uat *UserAttribute) Serialize(w io.Writer) (err error) {
var buf bytes.Buffer
for _, sp := range uat.Contents {
sp.Serialize(&buf)
}
if err = serializeHeader(w, packetTypeUserAttribute, buf.Len()); err != nil {
return err
}
_, err = w.Write(buf.Bytes())
return
}
// ImageData returns zero or more byte slices, each containing
// JPEG File Interchange Format (JFIF), for each photo in the
// the user attribute packet.
func (uat *UserAttribute) ImageData() (imageData [][]byte) {
for _, sp := range uat.Contents {
if sp.SubType == UserAttrImageSubpacket && len(sp.Contents) > 16 {
imageData = append(imageData, sp.Contents[16:])
}
}
return
}

160
vendor/golang.org/x/crypto/openpgp/packet/userid.go generated vendored Normal file
View File

@ -0,0 +1,160 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"io"
"io/ioutil"
"strings"
)
// UserId contains text that is intended to represent the name and email
// address of the key holder. See RFC 4880, section 5.11. By convention, this
// takes the form "Full Name (Comment) <email@example.com>"
type UserId struct {
Id string // By convention, this takes the form "Full Name (Comment) <email@example.com>" which is split out in the fields below.
Name, Comment, Email string
}
func hasInvalidCharacters(s string) bool {
for _, c := range s {
switch c {
case '(', ')', '<', '>', 0:
return true
}
}
return false
}
// NewUserId returns a UserId or nil if any of the arguments contain invalid
// characters. The invalid characters are '\x00', '(', ')', '<' and '>'
func NewUserId(name, comment, email string) *UserId {
// RFC 4880 doesn't deal with the structure of userid strings; the
// name, comment and email form is just a convention. However, there's
// no convention about escaping the metacharacters and GPG just refuses
// to create user ids where, say, the name contains a '('. We mirror
// this behaviour.
if hasInvalidCharacters(name) || hasInvalidCharacters(comment) || hasInvalidCharacters(email) {
return nil
}
uid := new(UserId)
uid.Name, uid.Comment, uid.Email = name, comment, email
uid.Id = name
if len(comment) > 0 {
if len(uid.Id) > 0 {
uid.Id += " "
}
uid.Id += "("
uid.Id += comment
uid.Id += ")"
}
if len(email) > 0 {
if len(uid.Id) > 0 {
uid.Id += " "
}
uid.Id += "<"
uid.Id += email
uid.Id += ">"
}
return uid
}
func (uid *UserId) parse(r io.Reader) (err error) {
// RFC 4880, section 5.11
b, err := ioutil.ReadAll(r)
if err != nil {
return
}
uid.Id = string(b)
uid.Name, uid.Comment, uid.Email = parseUserId(uid.Id)
return
}
// Serialize marshals uid to w in the form of an OpenPGP packet, including
// header.
func (uid *UserId) Serialize(w io.Writer) error {
err := serializeHeader(w, packetTypeUserId, len(uid.Id))
if err != nil {
return err
}
_, err = w.Write([]byte(uid.Id))
return err
}
// parseUserId extracts the name, comment and email from a user id string that
// is formatted as "Full Name (Comment) <email@example.com>".
func parseUserId(id string) (name, comment, email string) {
var n, c, e struct {
start, end int
}
var state int
for offset, rune := range id {
switch state {
case 0:
// Entering name
n.start = offset
state = 1
fallthrough
case 1:
// In name
if rune == '(' {
state = 2
n.end = offset
} else if rune == '<' {
state = 5
n.end = offset
}
case 2:
// Entering comment
c.start = offset
state = 3
fallthrough
case 3:
// In comment
if rune == ')' {
state = 4
c.end = offset
}
case 4:
// Between comment and email
if rune == '<' {
state = 5
}
case 5:
// Entering email
e.start = offset
state = 6
fallthrough
case 6:
// In email
if rune == '>' {
state = 7
e.end = offset
}
default:
// After email
}
}
switch state {
case 1:
// ended in the name
n.end = len(id)
case 3:
// ended in comment
c.end = len(id)
case 6:
// ended in email
e.end = len(id)
}
name = strings.TrimSpace(id[n.start:n.end])
comment = strings.TrimSpace(id[c.start:c.end])
email = strings.TrimSpace(id[e.start:e.end])
return
}

442
vendor/golang.org/x/crypto/openpgp/read.go generated vendored Normal file
View File

@ -0,0 +1,442 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package openpgp implements high level operations on OpenPGP messages.
package openpgp // import "golang.org/x/crypto/openpgp"
import (
"crypto"
_ "crypto/sha256"
"hash"
"io"
"strconv"
"golang.org/x/crypto/openpgp/armor"
"golang.org/x/crypto/openpgp/errors"
"golang.org/x/crypto/openpgp/packet"
)
// SignatureType is the armor type for a PGP signature.
var SignatureType = "PGP SIGNATURE"
// readArmored reads an armored block with the given type.
func readArmored(r io.Reader, expectedType string) (body io.Reader, err error) {
block, err := armor.Decode(r)
if err != nil {
return
}
if block.Type != expectedType {
return nil, errors.InvalidArgumentError("expected '" + expectedType + "', got: " + block.Type)
}
return block.Body, nil
}
// MessageDetails contains the result of parsing an OpenPGP encrypted and/or
// signed message.
type MessageDetails struct {
IsEncrypted bool // true if the message was encrypted.
EncryptedToKeyIds []uint64 // the list of recipient key ids.
IsSymmetricallyEncrypted bool // true if a passphrase could have decrypted the message.
DecryptedWith Key // the private key used to decrypt the message, if any.
IsSigned bool // true if the message is signed.
SignedByKeyId uint64 // the key id of the signer, if any.
SignedBy *Key // the key of the signer, if available.
LiteralData *packet.LiteralData // the metadata of the contents
UnverifiedBody io.Reader // the contents of the message.
// If IsSigned is true and SignedBy is non-zero then the signature will
// be verified as UnverifiedBody is read. The signature cannot be
// checked until the whole of UnverifiedBody is read so UnverifiedBody
// must be consumed until EOF before the data can be trusted. Even if a
// message isn't signed (or the signer is unknown) the data may contain
// an authentication code that is only checked once UnverifiedBody has
// been consumed. Once EOF has been seen, the following fields are
// valid. (An authentication code failure is reported as a
// SignatureError error when reading from UnverifiedBody.)
SignatureError error // nil if the signature is good.
Signature *packet.Signature // the signature packet itself, if v4 (default)
SignatureV3 *packet.SignatureV3 // the signature packet if it is a v2 or v3 signature
decrypted io.ReadCloser
}
// A PromptFunction is used as a callback by functions that may need to decrypt
// a private key, or prompt for a passphrase. It is called with a list of
// acceptable, encrypted private keys and a boolean that indicates whether a
// passphrase is usable. It should either decrypt a private key or return a
// passphrase to try. If the decrypted private key or given passphrase isn't
// correct, the function will be called again, forever. Any error returned will
// be passed up.
type PromptFunction func(keys []Key, symmetric bool) ([]byte, error)
// A keyEnvelopePair is used to store a private key with the envelope that
// contains a symmetric key, encrypted with that key.
type keyEnvelopePair struct {
key Key
encryptedKey *packet.EncryptedKey
}
// ReadMessage parses an OpenPGP message that may be signed and/or encrypted.
// The given KeyRing should contain both public keys (for signature
// verification) and, possibly encrypted, private keys for decrypting.
// If config is nil, sensible defaults will be used.
func ReadMessage(r io.Reader, keyring KeyRing, prompt PromptFunction, config *packet.Config) (md *MessageDetails, err error) {
var p packet.Packet
var symKeys []*packet.SymmetricKeyEncrypted
var pubKeys []keyEnvelopePair
var se *packet.SymmetricallyEncrypted
packets := packet.NewReader(r)
md = new(MessageDetails)
md.IsEncrypted = true
// The message, if encrypted, starts with a number of packets
// containing an encrypted decryption key. The decryption key is either
// encrypted to a public key, or with a passphrase. This loop
// collects these packets.
ParsePackets:
for {
p, err = packets.Next()
if err != nil {
return nil, err
}
switch p := p.(type) {
case *packet.SymmetricKeyEncrypted:
// This packet contains the decryption key encrypted with a passphrase.
md.IsSymmetricallyEncrypted = true
symKeys = append(symKeys, p)
case *packet.EncryptedKey:
// This packet contains the decryption key encrypted to a public key.
md.EncryptedToKeyIds = append(md.EncryptedToKeyIds, p.KeyId)
switch p.Algo {
case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSAEncryptOnly, packet.PubKeyAlgoElGamal:
break
default:
continue
}
var keys []Key
if p.KeyId == 0 {
keys = keyring.DecryptionKeys()
} else {
keys = keyring.KeysById(p.KeyId)
}
for _, k := range keys {
pubKeys = append(pubKeys, keyEnvelopePair{k, p})
}
case *packet.SymmetricallyEncrypted:
se = p
break ParsePackets
case *packet.Compressed, *packet.LiteralData, *packet.OnePassSignature:
// This message isn't encrypted.
if len(symKeys) != 0 || len(pubKeys) != 0 {
return nil, errors.StructuralError("key material not followed by encrypted message")
}
packets.Unread(p)
return readSignedMessage(packets, nil, keyring)
}
}
var candidates []Key
var decrypted io.ReadCloser
// Now that we have the list of encrypted keys we need to decrypt at
// least one of them or, if we cannot, we need to call the prompt
// function so that it can decrypt a key or give us a passphrase.
FindKey:
for {
// See if any of the keys already have a private key available
candidates = candidates[:0]
candidateFingerprints := make(map[string]bool)
for _, pk := range pubKeys {
if pk.key.PrivateKey == nil {
continue
}
if !pk.key.PrivateKey.Encrypted {
if len(pk.encryptedKey.Key) == 0 {
pk.encryptedKey.Decrypt(pk.key.PrivateKey, config)
}
if len(pk.encryptedKey.Key) == 0 {
continue
}
decrypted, err = se.Decrypt(pk.encryptedKey.CipherFunc, pk.encryptedKey.Key)
if err != nil && err != errors.ErrKeyIncorrect {
return nil, err
}
if decrypted != nil {
md.DecryptedWith = pk.key
break FindKey
}
} else {
fpr := string(pk.key.PublicKey.Fingerprint[:])
if v := candidateFingerprints[fpr]; v {
continue
}
candidates = append(candidates, pk.key)
candidateFingerprints[fpr] = true
}
}
if len(candidates) == 0 && len(symKeys) == 0 {
return nil, errors.ErrKeyIncorrect
}
if prompt == nil {
return nil, errors.ErrKeyIncorrect
}
passphrase, err := prompt(candidates, len(symKeys) != 0)
if err != nil {
return nil, err
}
// Try the symmetric passphrase first
if len(symKeys) != 0 && passphrase != nil {
for _, s := range symKeys {
key, cipherFunc, err := s.Decrypt(passphrase)
if err == nil {
decrypted, err = se.Decrypt(cipherFunc, key)
if err != nil && err != errors.ErrKeyIncorrect {
return nil, err
}
if decrypted != nil {
break FindKey
}
}
}
}
}
md.decrypted = decrypted
if err := packets.Push(decrypted); err != nil {
return nil, err
}
return readSignedMessage(packets, md, keyring)
}
// readSignedMessage reads a possibly signed message if mdin is non-zero then
// that structure is updated and returned. Otherwise a fresh MessageDetails is
// used.
func readSignedMessage(packets *packet.Reader, mdin *MessageDetails, keyring KeyRing) (md *MessageDetails, err error) {
if mdin == nil {
mdin = new(MessageDetails)
}
md = mdin
var p packet.Packet
var h hash.Hash
var wrappedHash hash.Hash
FindLiteralData:
for {
p, err = packets.Next()
if err != nil {
return nil, err
}
switch p := p.(type) {
case *packet.Compressed:
if err := packets.Push(p.Body); err != nil {
return nil, err
}
case *packet.OnePassSignature:
if !p.IsLast {
return nil, errors.UnsupportedError("nested signatures")
}
h, wrappedHash, err = hashForSignature(p.Hash, p.SigType)
if err != nil {
md = nil
return
}
md.IsSigned = true
md.SignedByKeyId = p.KeyId
keys := keyring.KeysByIdUsage(p.KeyId, packet.KeyFlagSign)
if len(keys) > 0 {
md.SignedBy = &keys[0]
}
case *packet.LiteralData:
md.LiteralData = p
break FindLiteralData
}
}
if md.SignedBy != nil {
md.UnverifiedBody = &signatureCheckReader{packets, h, wrappedHash, md}
} else if md.decrypted != nil {
md.UnverifiedBody = checkReader{md}
} else {
md.UnverifiedBody = md.LiteralData.Body
}
return md, nil
}
// hashForSignature returns a pair of hashes that can be used to verify a
// signature. The signature may specify that the contents of the signed message
// should be preprocessed (i.e. to normalize line endings). Thus this function
// returns two hashes. The second should be used to hash the message itself and
// performs any needed preprocessing.
func hashForSignature(hashId crypto.Hash, sigType packet.SignatureType) (hash.Hash, hash.Hash, error) {
if !hashId.Available() {
return nil, nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashId)))
}
h := hashId.New()
switch sigType {
case packet.SigTypeBinary:
return h, h, nil
case packet.SigTypeText:
return h, NewCanonicalTextHash(h), nil
}
return nil, nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType)))
}
// checkReader wraps an io.Reader from a LiteralData packet. When it sees EOF
// it closes the ReadCloser from any SymmetricallyEncrypted packet to trigger
// MDC checks.
type checkReader struct {
md *MessageDetails
}
func (cr checkReader) Read(buf []byte) (n int, err error) {
n, err = cr.md.LiteralData.Body.Read(buf)
if err == io.EOF {
mdcErr := cr.md.decrypted.Close()
if mdcErr != nil {
err = mdcErr
}
}
return
}
// signatureCheckReader wraps an io.Reader from a LiteralData packet and hashes
// the data as it is read. When it sees an EOF from the underlying io.Reader
// it parses and checks a trailing Signature packet and triggers any MDC checks.
type signatureCheckReader struct {
packets *packet.Reader
h, wrappedHash hash.Hash
md *MessageDetails
}
func (scr *signatureCheckReader) Read(buf []byte) (n int, err error) {
n, err = scr.md.LiteralData.Body.Read(buf)
scr.wrappedHash.Write(buf[:n])
if err == io.EOF {
var p packet.Packet
p, scr.md.SignatureError = scr.packets.Next()
if scr.md.SignatureError != nil {
return
}
var ok bool
if scr.md.Signature, ok = p.(*packet.Signature); ok {
scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignature(scr.h, scr.md.Signature)
} else if scr.md.SignatureV3, ok = p.(*packet.SignatureV3); ok {
scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignatureV3(scr.h, scr.md.SignatureV3)
} else {
scr.md.SignatureError = errors.StructuralError("LiteralData not followed by Signature")
return
}
// The SymmetricallyEncrypted packet, if any, might have an
// unsigned hash of its own. In order to check this we need to
// close that Reader.
if scr.md.decrypted != nil {
mdcErr := scr.md.decrypted.Close()
if mdcErr != nil {
err = mdcErr
}
}
}
return
}
// CheckDetachedSignature takes a signed file and a detached signature and
// returns the signer if the signature is valid. If the signer isn't known,
// ErrUnknownIssuer is returned.
func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) {
var issuerKeyId uint64
var hashFunc crypto.Hash
var sigType packet.SignatureType
var keys []Key
var p packet.Packet
packets := packet.NewReader(signature)
for {
p, err = packets.Next()
if err == io.EOF {
return nil, errors.ErrUnknownIssuer
}
if err != nil {
return nil, err
}
switch sig := p.(type) {
case *packet.Signature:
if sig.IssuerKeyId == nil {
return nil, errors.StructuralError("signature doesn't have an issuer")
}
issuerKeyId = *sig.IssuerKeyId
hashFunc = sig.Hash
sigType = sig.SigType
case *packet.SignatureV3:
issuerKeyId = sig.IssuerKeyId
hashFunc = sig.Hash
sigType = sig.SigType
default:
return nil, errors.StructuralError("non signature packet found")
}
keys = keyring.KeysByIdUsage(issuerKeyId, packet.KeyFlagSign)
if len(keys) > 0 {
break
}
}
if len(keys) == 0 {
panic("unreachable")
}
h, wrappedHash, err := hashForSignature(hashFunc, sigType)
if err != nil {
return nil, err
}
if _, err := io.Copy(wrappedHash, signed); err != nil && err != io.EOF {
return nil, err
}
for _, key := range keys {
switch sig := p.(type) {
case *packet.Signature:
err = key.PublicKey.VerifySignature(h, sig)
case *packet.SignatureV3:
err = key.PublicKey.VerifySignatureV3(h, sig)
default:
panic("unreachable")
}
if err == nil {
return key.Entity, nil
}
}
return nil, err
}
// CheckArmoredDetachedSignature performs the same actions as
// CheckDetachedSignature but expects the signature to be armored.
func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) {
body, err := readArmored(signature, SignatureType)
if err != nil {
return
}
return CheckDetachedSignature(keyring, signed, body)
}

273
vendor/golang.org/x/crypto/openpgp/s2k/s2k.go generated vendored Normal file
View File

@ -0,0 +1,273 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package s2k implements the various OpenPGP string-to-key transforms as
// specified in RFC 4800 section 3.7.1.
package s2k // import "golang.org/x/crypto/openpgp/s2k"
import (
"crypto"
"hash"
"io"
"strconv"
"golang.org/x/crypto/openpgp/errors"
)
// Config collects configuration parameters for s2k key-stretching
// transformatioms. A nil *Config is valid and results in all default
// values. Currently, Config is used only by the Serialize function in
// this package.
type Config struct {
// Hash is the default hash function to be used. If
// nil, SHA1 is used.
Hash crypto.Hash
// S2KCount is only used for symmetric encryption. It
// determines the strength of the passphrase stretching when
// the said passphrase is hashed to produce a key. S2KCount
// should be between 1024 and 65011712, inclusive. If Config
// is nil or S2KCount is 0, the value 65536 used. Not all
// values in the above range can be represented. S2KCount will
// be rounded up to the next representable value if it cannot
// be encoded exactly. When set, it is strongly encrouraged to
// use a value that is at least 65536. See RFC 4880 Section
// 3.7.1.3.
S2KCount int
}
func (c *Config) hash() crypto.Hash {
if c == nil || uint(c.Hash) == 0 {
// SHA1 is the historical default in this package.
return crypto.SHA1
}
return c.Hash
}
func (c *Config) encodedCount() uint8 {
if c == nil || c.S2KCount == 0 {
return 96 // The common case. Correspoding to 65536
}
i := c.S2KCount
switch {
// Behave like GPG. Should we make 65536 the lowest value used?
case i < 1024:
i = 1024
case i > 65011712:
i = 65011712
}
return encodeCount(i)
}
// encodeCount converts an iterative "count" in the range 1024 to
// 65011712, inclusive, to an encoded count. The return value is the
// octet that is actually stored in the GPG file. encodeCount panics
// if i is not in the above range (encodedCount above takes care to
// pass i in the correct range). See RFC 4880 Section 3.7.7.1.
func encodeCount(i int) uint8 {
if i < 1024 || i > 65011712 {
panic("count arg i outside the required range")
}
for encoded := 0; encoded < 256; encoded++ {
count := decodeCount(uint8(encoded))
if count >= i {
return uint8(encoded)
}
}
return 255
}
// decodeCount returns the s2k mode 3 iterative "count" corresponding to
// the encoded octet c.
func decodeCount(c uint8) int {
return (16 + int(c&15)) << (uint32(c>>4) + 6)
}
// Simple writes to out the result of computing the Simple S2K function (RFC
// 4880, section 3.7.1.1) using the given hash and input passphrase.
func Simple(out []byte, h hash.Hash, in []byte) {
Salted(out, h, in, nil)
}
var zero [1]byte
// Salted writes to out the result of computing the Salted S2K function (RFC
// 4880, section 3.7.1.2) using the given hash, input passphrase and salt.
func Salted(out []byte, h hash.Hash, in []byte, salt []byte) {
done := 0
var digest []byte
for i := 0; done < len(out); i++ {
h.Reset()
for j := 0; j < i; j++ {
h.Write(zero[:])
}
h.Write(salt)
h.Write(in)
digest = h.Sum(digest[:0])
n := copy(out[done:], digest)
done += n
}
}
// Iterated writes to out the result of computing the Iterated and Salted S2K
// function (RFC 4880, section 3.7.1.3) using the given hash, input passphrase,
// salt and iteration count.
func Iterated(out []byte, h hash.Hash, in []byte, salt []byte, count int) {
combined := make([]byte, len(in)+len(salt))
copy(combined, salt)
copy(combined[len(salt):], in)
if count < len(combined) {
count = len(combined)
}
done := 0
var digest []byte
for i := 0; done < len(out); i++ {
h.Reset()
for j := 0; j < i; j++ {
h.Write(zero[:])
}
written := 0
for written < count {
if written+len(combined) > count {
todo := count - written
h.Write(combined[:todo])
written = count
} else {
h.Write(combined)
written += len(combined)
}
}
digest = h.Sum(digest[:0])
n := copy(out[done:], digest)
done += n
}
}
// Parse reads a binary specification for a string-to-key transformation from r
// and returns a function which performs that transform.
func Parse(r io.Reader) (f func(out, in []byte), err error) {
var buf [9]byte
_, err = io.ReadFull(r, buf[:2])
if err != nil {
return
}
hash, ok := HashIdToHash(buf[1])
if !ok {
return nil, errors.UnsupportedError("hash for S2K function: " + strconv.Itoa(int(buf[1])))
}
if !hash.Available() {
return nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hash)))
}
h := hash.New()
switch buf[0] {
case 0:
f := func(out, in []byte) {
Simple(out, h, in)
}
return f, nil
case 1:
_, err = io.ReadFull(r, buf[:8])
if err != nil {
return
}
f := func(out, in []byte) {
Salted(out, h, in, buf[:8])
}
return f, nil
case 3:
_, err = io.ReadFull(r, buf[:9])
if err != nil {
return
}
count := decodeCount(buf[8])
f := func(out, in []byte) {
Iterated(out, h, in, buf[:8], count)
}
return f, nil
}
return nil, errors.UnsupportedError("S2K function")
}
// Serialize salts and stretches the given passphrase and writes the
// resulting key into key. It also serializes an S2K descriptor to
// w. The key stretching can be configured with c, which may be
// nil. In that case, sensible defaults will be used.
func Serialize(w io.Writer, key []byte, rand io.Reader, passphrase []byte, c *Config) error {
var buf [11]byte
buf[0] = 3 /* iterated and salted */
buf[1], _ = HashToHashId(c.hash())
salt := buf[2:10]
if _, err := io.ReadFull(rand, salt); err != nil {
return err
}
encodedCount := c.encodedCount()
count := decodeCount(encodedCount)
buf[10] = encodedCount
if _, err := w.Write(buf[:]); err != nil {
return err
}
Iterated(key, c.hash().New(), passphrase, salt, count)
return nil
}
// hashToHashIdMapping contains pairs relating OpenPGP's hash identifier with
// Go's crypto.Hash type. See RFC 4880, section 9.4.
var hashToHashIdMapping = []struct {
id byte
hash crypto.Hash
name string
}{
{1, crypto.MD5, "MD5"},
{2, crypto.SHA1, "SHA1"},
{3, crypto.RIPEMD160, "RIPEMD160"},
{8, crypto.SHA256, "SHA256"},
{9, crypto.SHA384, "SHA384"},
{10, crypto.SHA512, "SHA512"},
{11, crypto.SHA224, "SHA224"},
}
// HashIdToHash returns a crypto.Hash which corresponds to the given OpenPGP
// hash id.
func HashIdToHash(id byte) (h crypto.Hash, ok bool) {
for _, m := range hashToHashIdMapping {
if m.id == id {
return m.hash, true
}
}
return 0, false
}
// HashIdToString returns the name of the hash function corresponding to the
// given OpenPGP hash id.
func HashIdToString(id byte) (name string, ok bool) {
for _, m := range hashToHashIdMapping {
if m.id == id {
return m.name, true
}
}
return "", false
}
// HashIdToHash returns an OpenPGP hash id which corresponds the given Hash.
func HashToHashId(h crypto.Hash) (id byte, ok bool) {
for _, m := range hashToHashIdMapping {
if m.hash == h {
return m.id, true
}
}
return 0, false
}

378
vendor/golang.org/x/crypto/openpgp/write.go generated vendored Normal file
View File

@ -0,0 +1,378 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package openpgp
import (
"crypto"
"hash"
"io"
"strconv"
"time"
"golang.org/x/crypto/openpgp/armor"
"golang.org/x/crypto/openpgp/errors"
"golang.org/x/crypto/openpgp/packet"
"golang.org/x/crypto/openpgp/s2k"
)
// DetachSign signs message with the private key from signer (which must
// already have been decrypted) and writes the signature to w.
// If config is nil, sensible defaults will be used.
func DetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error {
return detachSign(w, signer, message, packet.SigTypeBinary, config)
}
// ArmoredDetachSign signs message with the private key from signer (which
// must already have been decrypted) and writes an armored signature to w.
// If config is nil, sensible defaults will be used.
func ArmoredDetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) (err error) {
return armoredDetachSign(w, signer, message, packet.SigTypeBinary, config)
}
// DetachSignText signs message (after canonicalising the line endings) with
// the private key from signer (which must already have been decrypted) and
// writes the signature to w.
// If config is nil, sensible defaults will be used.
func DetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error {
return detachSign(w, signer, message, packet.SigTypeText, config)
}
// ArmoredDetachSignText signs message (after canonicalising the line endings)
// with the private key from signer (which must already have been decrypted)
// and writes an armored signature to w.
// If config is nil, sensible defaults will be used.
func ArmoredDetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error {
return armoredDetachSign(w, signer, message, packet.SigTypeText, config)
}
func armoredDetachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) {
out, err := armor.Encode(w, SignatureType, nil)
if err != nil {
return
}
err = detachSign(out, signer, message, sigType, config)
if err != nil {
return
}
return out.Close()
}
func detachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) {
if signer.PrivateKey == nil {
return errors.InvalidArgumentError("signing key doesn't have a private key")
}
if signer.PrivateKey.Encrypted {
return errors.InvalidArgumentError("signing key is encrypted")
}
sig := new(packet.Signature)
sig.SigType = sigType
sig.PubKeyAlgo = signer.PrivateKey.PubKeyAlgo
sig.Hash = config.Hash()
sig.CreationTime = config.Now()
sig.IssuerKeyId = &signer.PrivateKey.KeyId
h, wrappedHash, err := hashForSignature(sig.Hash, sig.SigType)
if err != nil {
return
}
io.Copy(wrappedHash, message)
err = sig.Sign(h, signer.PrivateKey, config)
if err != nil {
return
}
return sig.Serialize(w)
}
// FileHints contains metadata about encrypted files. This metadata is, itself,
// encrypted.
type FileHints struct {
// IsBinary can be set to hint that the contents are binary data.
IsBinary bool
// FileName hints at the name of the file that should be written. It's
// truncated to 255 bytes if longer. It may be empty to suggest that the
// file should not be written to disk. It may be equal to "_CONSOLE" to
// suggest the data should not be written to disk.
FileName string
// ModTime contains the modification time of the file, or the zero time if not applicable.
ModTime time.Time
}
// SymmetricallyEncrypt acts like gpg -c: it encrypts a file with a passphrase.
// The resulting WriteCloser must be closed after the contents of the file have
// been written.
// If config is nil, sensible defaults will be used.
func SymmetricallyEncrypt(ciphertext io.Writer, passphrase []byte, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) {
if hints == nil {
hints = &FileHints{}
}
key, err := packet.SerializeSymmetricKeyEncrypted(ciphertext, passphrase, config)
if err != nil {
return
}
w, err := packet.SerializeSymmetricallyEncrypted(ciphertext, config.Cipher(), key, config)
if err != nil {
return
}
literaldata := w
if algo := config.Compression(); algo != packet.CompressionNone {
var compConfig *packet.CompressionConfig
if config != nil {
compConfig = config.CompressionConfig
}
literaldata, err = packet.SerializeCompressed(w, algo, compConfig)
if err != nil {
return
}
}
var epochSeconds uint32
if !hints.ModTime.IsZero() {
epochSeconds = uint32(hints.ModTime.Unix())
}
return packet.SerializeLiteral(literaldata, hints.IsBinary, hints.FileName, epochSeconds)
}
// intersectPreferences mutates and returns a prefix of a that contains only
// the values in the intersection of a and b. The order of a is preserved.
func intersectPreferences(a []uint8, b []uint8) (intersection []uint8) {
var j int
for _, v := range a {
for _, v2 := range b {
if v == v2 {
a[j] = v
j++
break
}
}
}
return a[:j]
}
func hashToHashId(h crypto.Hash) uint8 {
v, ok := s2k.HashToHashId(h)
if !ok {
panic("tried to convert unknown hash")
}
return v
}
// Encrypt encrypts a message to a number of recipients and, optionally, signs
// it. hints contains optional information, that is also encrypted, that aids
// the recipients in processing the message. The resulting WriteCloser must
// be closed after the contents of the file have been written.
// If config is nil, sensible defaults will be used.
func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) {
var signer *packet.PrivateKey
if signed != nil {
signKey, ok := signed.signingKey(config.Now())
if !ok {
return nil, errors.InvalidArgumentError("no valid signing keys")
}
signer = signKey.PrivateKey
if signer == nil {
return nil, errors.InvalidArgumentError("no private key in signing key")
}
if signer.Encrypted {
return nil, errors.InvalidArgumentError("signing key must be decrypted")
}
}
// These are the possible ciphers that we'll use for the message.
candidateCiphers := []uint8{
uint8(packet.CipherAES128),
uint8(packet.CipherAES256),
uint8(packet.CipherCAST5),
}
// These are the possible hash functions that we'll use for the signature.
candidateHashes := []uint8{
hashToHashId(crypto.SHA256),
hashToHashId(crypto.SHA512),
hashToHashId(crypto.SHA1),
hashToHashId(crypto.RIPEMD160),
}
// In the event that a recipient doesn't specify any supported ciphers
// or hash functions, these are the ones that we assume that every
// implementation supports.
defaultCiphers := candidateCiphers[len(candidateCiphers)-1:]
defaultHashes := candidateHashes[len(candidateHashes)-1:]
encryptKeys := make([]Key, len(to))
for i := range to {
var ok bool
encryptKeys[i], ok = to[i].encryptionKey(config.Now())
if !ok {
return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no encryption keys")
}
sig := to[i].primaryIdentity().SelfSignature
preferredSymmetric := sig.PreferredSymmetric
if len(preferredSymmetric) == 0 {
preferredSymmetric = defaultCiphers
}
preferredHashes := sig.PreferredHash
if len(preferredHashes) == 0 {
preferredHashes = defaultHashes
}
candidateCiphers = intersectPreferences(candidateCiphers, preferredSymmetric)
candidateHashes = intersectPreferences(candidateHashes, preferredHashes)
}
if len(candidateCiphers) == 0 || len(candidateHashes) == 0 {
return nil, errors.InvalidArgumentError("cannot encrypt because recipient set shares no common algorithms")
}
cipher := packet.CipherFunction(candidateCiphers[0])
// If the cipher specified by config is a candidate, we'll use that.
configuredCipher := config.Cipher()
for _, c := range candidateCiphers {
cipherFunc := packet.CipherFunction(c)
if cipherFunc == configuredCipher {
cipher = cipherFunc
break
}
}
var hash crypto.Hash
for _, hashId := range candidateHashes {
if h, ok := s2k.HashIdToHash(hashId); ok && h.Available() {
hash = h
break
}
}
// If the hash specified by config is a candidate, we'll use that.
if configuredHash := config.Hash(); configuredHash.Available() {
for _, hashId := range candidateHashes {
if h, ok := s2k.HashIdToHash(hashId); ok && h == configuredHash {
hash = h
break
}
}
}
if hash == 0 {
hashId := candidateHashes[0]
name, ok := s2k.HashIdToString(hashId)
if !ok {
name = "#" + strconv.Itoa(int(hashId))
}
return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)")
}
symKey := make([]byte, cipher.KeySize())
if _, err := io.ReadFull(config.Random(), symKey); err != nil {
return nil, err
}
for _, key := range encryptKeys {
if err := packet.SerializeEncryptedKey(ciphertext, key.PublicKey, cipher, symKey, config); err != nil {
return nil, err
}
}
encryptedData, err := packet.SerializeSymmetricallyEncrypted(ciphertext, cipher, symKey, config)
if err != nil {
return
}
if signer != nil {
ops := &packet.OnePassSignature{
SigType: packet.SigTypeBinary,
Hash: hash,
PubKeyAlgo: signer.PubKeyAlgo,
KeyId: signer.KeyId,
IsLast: true,
}
if err := ops.Serialize(encryptedData); err != nil {
return nil, err
}
}
if hints == nil {
hints = &FileHints{}
}
w := encryptedData
if signer != nil {
// If we need to write a signature packet after the literal
// data then we need to stop literalData from closing
// encryptedData.
w = noOpCloser{encryptedData}
}
var epochSeconds uint32
if !hints.ModTime.IsZero() {
epochSeconds = uint32(hints.ModTime.Unix())
}
literalData, err := packet.SerializeLiteral(w, hints.IsBinary, hints.FileName, epochSeconds)
if err != nil {
return nil, err
}
if signer != nil {
return signatureWriter{encryptedData, literalData, hash, hash.New(), signer, config}, nil
}
return literalData, nil
}
// signatureWriter hashes the contents of a message while passing it along to
// literalData. When closed, it closes literalData, writes a signature packet
// to encryptedData and then also closes encryptedData.
type signatureWriter struct {
encryptedData io.WriteCloser
literalData io.WriteCloser
hashType crypto.Hash
h hash.Hash
signer *packet.PrivateKey
config *packet.Config
}
func (s signatureWriter) Write(data []byte) (int, error) {
s.h.Write(data)
return s.literalData.Write(data)
}
func (s signatureWriter) Close() error {
sig := &packet.Signature{
SigType: packet.SigTypeBinary,
PubKeyAlgo: s.signer.PubKeyAlgo,
Hash: s.hashType,
CreationTime: s.config.Now(),
IssuerKeyId: &s.signer.KeyId,
}
if err := sig.Sign(s.h, s.signer, s.config); err != nil {
return err
}
if err := s.literalData.Close(); err != nil {
return err
}
if err := sig.Serialize(s.encryptedData); err != nil {
return err
}
return s.encryptedData.Close()
}
// noOpCloser is like an ioutil.NopCloser, but for an io.Writer.
// TODO: we have two of these in OpenPGP packages alone. This probably needs
// to be promoted somewhere more common.
type noOpCloser struct {
w io.Writer
}
func (c noOpCloser) Write(data []byte) (n int, err error) {
return c.w.Write(data)
}
func (c noOpCloser) Close() error {
return nil
}