Update vendored dependencies

This commit is contained in:
Alexander Neumann 2019-11-22 14:57:56 +01:00
parent a6e8af7e0f
commit a444731dc0
137 changed files with 17539 additions and 694 deletions

View File

@ -1,28 +0,0 @@
sudo: false
language: go
os:
- linux
env:
- ARCH=x86_64
- ARCH=i686
go:
- 1.11.x
- tip
matrix:
fast_finish: true
allow_failures:
- go: tip
addons:
apt:
packages:
- devscripts
script:
- diff -au <(gofmt -d .) <(printf "")
- diff -au <(licensecheck --check '.go$' --recursive --lines 0 * | grep -v -w 'Apache (v2.0)') <(printf "")
- make

View File

@ -1,15 +0,0 @@
all: checks
checks:
@go get -t ./...
@go vet ./...
@SERVER_ENDPOINT=play.minio.io:9000 ACCESS_KEY=Q3AM3UQ867SPQQA43P2F SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG ENABLE_HTTPS=1 MINT_MODE=full go test -race -v ./...
@go get github.com/dustin/go-humanize/...
@go get github.com/sirupsen/logrus/...
@SERVER_ENDPOINT=play.minio.io:9000 ACCESS_KEY=Q3AM3UQ867SPQQA43P2F SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG ENABLE_HTTPS=1 MINT_MODE=full go run functional_tests.go
@mkdir -p /tmp/examples && for i in $(echo examples/s3/*); do go build -o /tmp/examples/$(basename ${i:0:-3}) ${i}; done
@go get -u github.com/a8m/mark/...
@go get -u github.com/minio/cli/...
@go get -u golang.org/x/tools/cmd/goimports
@go get -u github.com/gernest/wow/...
@go build docs/validator.go && ./validator -m docs/API.md -t docs/checker.go.tpl

View File

@ -1,2 +0,0 @@
minio-go
Copyright 2015-2017 Minio, Inc.

View File

@ -1,50 +0,0 @@
// +build go1.7 go1.8
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017-2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"net"
"net/http"
"time"
)
// DefaultTransport - this default transport is similar to
// http.DefaultTransport but with additional param DisableCompression
// is set to true to avoid decompressing content with 'gzip' encoding.
var DefaultTransport http.RoundTripper = &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).DialContext,
MaxIdleConns: 100,
MaxIdleConnsPerHost: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
// Set this value so that the underlying transport round-tripper
// doesn't try to auto decode the body of objects with
// content-encoding set to `gzip`.
//
// Refer:
// https://golang.org/src/net/http/transport.go?h=roundTrip#L1843
DisableCompression: true,
}

30
vendor/github.com/minio/minio-go/v6/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,30 @@
sudo: false
language: go
os:
- linux
env:
- ARCH=x86_64
go:
- 1.13.x
- tip
matrix:
fast_finish: true
allow_failures:
- go: tip
before_install:
- sudo apt-get install devscripts
- mkdir /tmp/minio
- (cd /tmp/minio; GO111MODULE=on go get github.com/minio/minio)
- sudo cp testcerts/public.crt /usr/local/share/ca-certificates/
- sudo update-ca-certificates
- MINIO_ACCESS_KEY=minio MINIO_SECRET_KEY=minio123 ${GOPATH}/bin/minio server --compat --quiet --certs-dir testcerts data 2>&1 > minio.log &
script:
- diff -au <(gofmt -d .) <(printf "")
- diff -au <(licensecheck --check '.go$' --recursive --lines 0 * | grep -v -w 'Apache (v2.0)') <(printf "")
- make

View File

@ -5,7 +5,7 @@
Please go through this link [Maintainer Responsibility](https://gist.github.com/abperiasamy/f4d9b31d3186bbd26522)
### Making new releases
Tag and sign your release commit, additionally this step requires you to have access to Minio's trusted private key.
Tag and sign your release commit, additionally this step requires you to have access to MinIO's trusted private key.
```sh
$ export GNUPGHOME=/media/${USER}/minio/trusted
$ git tag -s 4.0.0
@ -23,11 +23,11 @@ $ grep libraryVersion api.go
Commit your changes
```
$ git commit -a -m "Update version for next release" --author "Minio Trusted <trusted@minio.io>"
$ git commit -a -m "Update version for next release" --author "MinIO Trusted <trusted@min.io>"
```
### Announce
Announce new release by adding release notes at https://github.com/minio/minio-go/releases from `trusted@minio.io` account. Release notes requires two sections `highlights` and `changelog`. Highlights is a bulleted list of salient features in this release and Changelog contains list of all commits since the last release.
Announce new release by adding release notes at https://github.com/minio/minio-go/releases from `trusted@min.io` account. Release notes requires two sections `highlights` and `changelog`. Highlights is a bulleted list of salient features in this release and Changelog contains list of all commits since the last release.
To generate `changelog`
```sh

22
vendor/github.com/minio/minio-go/v6/Makefile generated vendored Normal file
View File

@ -0,0 +1,22 @@
all: checks
.PHONY: examples docs
checks: vet test examples functional-test
vet:
@GO111MODULE=on go vet ./...
test:
@GO111MODULE=on SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minio SECRET_KEY=minio123 ENABLE_HTTPS=1 MINT_MODE=full go test -race -v ./...
examples:
@mkdir -p /tmp/examples && for i in $(echo examples/s3/*); do go build -o /tmp/examples/$(basename ${i:0:-3}) ${i}; done
functional-test:
@GO111MODULE=on SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minio SECRET_KEY=minio123 ENABLE_HTTPS=1 MINT_MODE=full go run functional_tests.go
clean:
@echo "Cleaning up all the generated files"
@find . -name '*.test' | xargs rm -fv
@find . -name '*~' | xargs rm -fv

2
vendor/github.com/minio/minio-go/v6/NOTICE generated vendored Normal file
View File

@ -0,0 +1,2 @@
minio-go
Copyright 2015-2017 MinIO, Inc.

View File

@ -1,18 +1,18 @@
# Minio Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) [![Apache V2 License](http://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/minio/minio-go/blob/master/LICENSE)
# MinIO Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) [![Apache V2 License](http://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/minio/minio-go/blob/master/LICENSE)
The Minio Go Client SDK provides simple APIs to access any Amazon S3 compatible object storage.
The MinIO Go Client SDK provides simple APIs to access any Amazon S3 compatible object storage.
This quickstart guide will show you how to install the Minio client SDK, connect to Minio, and provide a walkthrough for a simple file uploader. For a complete list of APIs and examples, please take a look at the [Go Client API Reference](https://docs.minio.io/docs/golang-client-api-reference).
This quickstart guide will show you how to install the MinIO client SDK, connect to MinIO, and provide a walkthrough for a simple file uploader. For a complete list of APIs and examples, please take a look at the [Go Client API Reference](https://docs.min.io/docs/golang-client-api-reference).
This document assumes that you have a working [Go development environment](https://docs.minio.io/docs/how-to-install-golang).
This document assumes that you have a working [Go development environment](https://golang.org/doc/install).
## Download from Github
```sh
go get -u github.com/minio/minio-go
GO111MODULE=on go get github.com/minio/minio-go/v6
```
## Initialize Minio Client
Minio client requires the following four parameters specified to connect to an Amazon S3 compatible object storage.
## Initialize MinIO Client
MinIO client requires the following four parameters specified to connect to an Amazon S3 compatible object storage.
| Parameter | Description|
| :--- | :--- |
@ -26,12 +26,12 @@ Minio client requires the following four parameters specified to connect to an A
package main
import (
"github.com/minio/minio-go"
"github.com/minio/minio-go/v6"
"log"
)
func main() {
endpoint := "play.minio.io:9000"
endpoint := "play.min.io"
accessKeyID := "Q3AM3UQ867SPQQA43P2F"
secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
useSSL := true
@ -49,19 +49,19 @@ func main() {
## Quick Start Example - File Uploader
This example program connects to an object storage server, creates a bucket and uploads a file to the bucket.
We will use the Minio server running at [https://play.minio.io:9000](https://play.minio.io:9000) in this example. Feel free to use this service for testing and development. Access credentials shown in this example are open to the public.
We will use the MinIO server running at [https://play.min.io](https://play.min.io) in this example. Feel free to use this service for testing and development. Access credentials shown in this example are open to the public.
### FileUploader.go
```go
package main
import (
"github.com/minio/minio-go"
"github.com/minio/minio-go/v6"
"log"
)
func main() {
endpoint := "play.minio.io:9000"
endpoint := "play.min.io"
accessKeyID := "Q3AM3UQ867SPQQA43P2F"
secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
useSSL := true
@ -79,8 +79,8 @@ func main() {
err = minioClient.MakeBucket(bucketName, location)
if err != nil {
// Check to see if we already own this bucket (which happens if you run this twice)
exists, err := minioClient.BucketExists(bucketName)
if err == nil && exists {
exists, errBucketExists := minioClient.BucketExists(bucketName)
if errBucketExists == nil && exists {
log.Printf("We already own %s\n", bucketName)
} else {
log.Fatalln(err)
@ -117,58 +117,58 @@ mc ls play/mymusic/
## API Reference
The full API Reference is available here.
* [Complete API Reference](https://docs.minio.io/docs/golang-client-api-reference)
* [Complete API Reference](https://docs.min.io/docs/golang-client-api-reference)
### API Reference : Bucket Operations
* [`MakeBucket`](https://docs.minio.io/docs/golang-client-api-reference#MakeBucket)
* [`ListBuckets`](https://docs.minio.io/docs/golang-client-api-reference#ListBuckets)
* [`BucketExists`](https://docs.minio.io/docs/golang-client-api-reference#BucketExists)
* [`RemoveBucket`](https://docs.minio.io/docs/golang-client-api-reference#RemoveBucket)
* [`ListObjects`](https://docs.minio.io/docs/golang-client-api-reference#ListObjects)
* [`ListObjectsV2`](https://docs.minio.io/docs/golang-client-api-reference#ListObjectsV2)
* [`ListIncompleteUploads`](https://docs.minio.io/docs/golang-client-api-reference#ListIncompleteUploads)
* [`MakeBucket`](https://docs.min.io/docs/golang-client-api-reference#MakeBucket)
* [`ListBuckets`](https://docs.min.io/docs/golang-client-api-reference#ListBuckets)
* [`BucketExists`](https://docs.min.io/docs/golang-client-api-reference#BucketExists)
* [`RemoveBucket`](https://docs.min.io/docs/golang-client-api-reference#RemoveBucket)
* [`ListObjects`](https://docs.min.io/docs/golang-client-api-reference#ListObjects)
* [`ListObjectsV2`](https://docs.min.io/docs/golang-client-api-reference#ListObjectsV2)
* [`ListIncompleteUploads`](https://docs.min.io/docs/golang-client-api-reference#ListIncompleteUploads)
### API Reference : Bucket policy Operations
* [`SetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketPolicy)
* [`GetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketPolicy)
* [`SetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#SetBucketPolicy)
* [`GetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#GetBucketPolicy)
### API Reference : Bucket notification Operations
* [`SetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketNotification)
* [`GetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketNotification)
* [`RemoveAllBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#RemoveAllBucketNotification)
* [`ListenBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#ListenBucketNotification) (Minio Extension)
* [`SetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#SetBucketNotification)
* [`GetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#GetBucketNotification)
* [`RemoveAllBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#RemoveAllBucketNotification)
* [`ListenBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#ListenBucketNotification) (MinIO Extension)
### API Reference : File Object Operations
* [`FPutObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject)
* [`FGetObject`](https://docs.minio.io/docs/golang-client-api-reference#FGetObject)
* [`FPutObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FPutObjectWithContext)
* [`FGetObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FGetObjectWithContext)
* [`FPutObject`](https://docs.min.io/docs/golang-client-api-reference#FPutObject)
* [`FGetObject`](https://docs.min.io/docs/golang-client-api-reference#FGetObject)
* [`FPutObjectWithContext`](https://docs.min.io/docs/golang-client-api-reference#FPutObjectWithContext)
* [`FGetObjectWithContext`](https://docs.min.io/docs/golang-client-api-reference#FGetObjectWithContext)
### API Reference : Object Operations
* [`GetObject`](https://docs.minio.io/docs/golang-client-api-reference#GetObject)
* [`PutObject`](https://docs.minio.io/docs/golang-client-api-reference#PutObject)
* [`GetObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#GetObjectWithContext)
* [`PutObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#PutObjectWithContext)
* [`PutObjectStreaming`](https://docs.minio.io/docs/golang-client-api-reference#PutObjectStreaming)
* [`StatObject`](https://docs.minio.io/docs/golang-client-api-reference#StatObject)
* [`CopyObject`](https://docs.minio.io/docs/golang-client-api-reference#CopyObject)
* [`RemoveObject`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObject)
* [`RemoveObjects`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObjects)
* [`RemoveIncompleteUpload`](https://docs.minio.io/docs/golang-client-api-reference#RemoveIncompleteUpload)
* [`SelectObjectContent`](https://docs.minio.io/docs/golang-client-api-reference#SelectObjectContent)
* [`GetObject`](https://docs.min.io/docs/golang-client-api-reference#GetObject)
* [`PutObject`](https://docs.min.io/docs/golang-client-api-reference#PutObject)
* [`GetObjectWithContext`](https://docs.min.io/docs/golang-client-api-reference#GetObjectWithContext)
* [`PutObjectWithContext`](https://docs.min.io/docs/golang-client-api-reference#PutObjectWithContext)
* [`PutObjectStreaming`](https://docs.min.io/docs/golang-client-api-reference#PutObjectStreaming)
* [`StatObject`](https://docs.min.io/docs/golang-client-api-reference#StatObject)
* [`CopyObject`](https://docs.min.io/docs/golang-client-api-reference#CopyObject)
* [`RemoveObject`](https://docs.min.io/docs/golang-client-api-reference#RemoveObject)
* [`RemoveObjects`](https://docs.min.io/docs/golang-client-api-reference#RemoveObjects)
* [`RemoveIncompleteUpload`](https://docs.min.io/docs/golang-client-api-reference#RemoveIncompleteUpload)
* [`SelectObjectContent`](https://docs.min.io/docs/golang-client-api-reference#SelectObjectContent)
### API Reference : Presigned Operations
* [`PresignedGetObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedGetObject)
* [`PresignedPutObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPutObject)
* [`PresignedHeadObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedHeadObject)
* [`PresignedPostPolicy`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPostPolicy)
* [`PresignedGetObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedGetObject)
* [`PresignedPutObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedPutObject)
* [`PresignedHeadObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedHeadObject)
* [`PresignedPostPolicy`](https://docs.min.io/docs/golang-client-api-reference#PresignedPostPolicy)
### API Reference : Client custom settings
* [`SetAppInfo`](http://docs.minio.io/docs/golang-client-api-reference#SetAppInfo)
* [`SetCustomTransport`](http://docs.minio.io/docs/golang-client-api-reference#SetCustomTransport)
* [`TraceOn`](http://docs.minio.io/docs/golang-client-api-reference#TraceOn)
* [`TraceOff`](http://docs.minio.io/docs/golang-client-api-reference#TraceOff)
* [`SetAppInfo`](http://docs.min.io/docs/golang-client-api-reference#SetAppInfo)
* [`SetCustomTransport`](http://docs.min.io/docs/golang-client-api-reference#SetCustomTransport)
* [`TraceOn`](http://docs.min.io/docs/golang-client-api-reference#TraceOn)
* [`TraceOff`](http://docs.min.io/docs/golang-client-api-reference#TraceOff)
## Full Examples
@ -194,7 +194,7 @@ The full API Reference is available here.
* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go)
* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go)
* [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go)
* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (Minio Extension)
* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (MinIO Extension)
### Full Examples : File Object Operations
* [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go)
@ -225,9 +225,8 @@ The full API Reference is available here.
* [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go)
## Explore Further
* [Complete Documentation](https://docs.minio.io)
* [Minio Go Client SDK API Reference](https://docs.minio.io/docs/golang-client-api-reference)
* [Go Music Player App Full Application Example](https://docs.minio.io/docs/go-music-player-app)
* [Complete Documentation](https://docs.min.io)
* [MinIO Go Client SDK API Reference](https://docs.min.io/docs/golang-client-api-reference)
## Contribute
[Contributors Guide](https://github.com/minio/minio-go/blob/master/CONTRIBUTING.md)

View File

@ -1,12 +1,12 @@
# 适用于与Amazon S3兼容云存储的Minio Go SDK [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge)
# 适用于与Amazon S3兼容云存储的MinIO Go SDK [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge)
Minio Go Client SDK提供了简单的API来访问任何与Amazon S3兼容的对象存储服务。
MinIO Go Client SDK提供了简单的API来访问任何与Amazon S3兼容的对象存储服务。
**支持的云存储:**
- AWS Signature Version 4
- Amazon S3
- Minio
- MinIO
- AWS Signature Version 2
- Google Cloud Storage (兼容模式)
@ -14,17 +14,17 @@ Minio Go Client SDK提供了简单的API来访问任何与Amazon S3兼容的对
- Ceph Object Gateway
- Riak CS
本文我们将学习如何安装Minio client SDK连接到Minio并提供一下文件上传的示例。对于完整的API以及示例请参考[Go Client API Reference](https://docs.minio.io/docs/golang-client-api-reference)。
本文我们将学习如何安装MinIO client SDK连接到MinIO并提供一下文件上传的示例。对于完整的API以及示例请参考[Go Client API Reference](https://docs.min.io/docs/golang-client-api-reference)。
本文假设你已经有 [Go开发环境](https://docs.minio.io/docs/how-to-install-golang)。
本文假设你已经有 [Go开发环境](https://golang.org/doc/install)。
## 从Github下载
```sh
go get -u github.com/minio/minio-go
```
## 初始化Minio Client
Minio client需要以下4个参数来连接与Amazon S3兼容的对象存储。
## 初始化MinIO Client
MinIO client需要以下4个参数来连接与Amazon S3兼容的对象存储。
| 参数 | 描述|
| :--- | :--- |
@ -38,12 +38,12 @@ Minio client需要以下4个参数来连接与Amazon S3兼容的对象存储。
package main
import (
"github.com/minio/minio-go"
"github.com/minio/minio-go/v6"
"log"
)
func main() {
endpoint := "play.minio.io:9000"
endpoint := "play.min.io"
accessKeyID := "Q3AM3UQ867SPQQA43P2F"
secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
useSSL := true
@ -61,19 +61,19 @@ func main() {
## 示例-文件上传
本示例连接到一个对象存储服务,创建一个存储桶并上传一个文件到存储桶中。
我们在本示例中使用运行在 [https://play.minio.io:9000](https://play.minio.io:9000) 上的Minio服务,你可以用这个服务来开发和测试。示例中的访问凭据是公开的。
我们在本示例中使用运行在 [https://play.min.io](https://play.min.io) 上的MinIO服务,你可以用这个服务来开发和测试。示例中的访问凭据是公开的。
### FileUploader.go
```go
package main
import (
"github.com/minio/minio-go"
"github.com/minio/minio-go/v6"
"log"
)
func main() {
endpoint := "play.minio.io:9000"
endpoint := "play.min.io"
accessKeyID := "Q3AM3UQ867SPQQA43P2F"
secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
useSSL := true
@ -127,60 +127,60 @@ mc ls play/mymusic/
## API文档
完整的API文档在这里。
* [完整API文档](https://docs.minio.io/docs/golang-client-api-reference)
* [完整API文档](https://docs.min.io/docs/golang-client-api-reference)
### API文档 : 操作存储桶
* [`MakeBucket`](https://docs.minio.io/docs/golang-client-api-reference#MakeBucket)
* [`ListBuckets`](https://docs.minio.io/docs/golang-client-api-reference#ListBuckets)
* [`BucketExists`](https://docs.minio.io/docs/golang-client-api-reference#BucketExists)
* [`RemoveBucket`](https://docs.minio.io/docs/golang-client-api-reference#RemoveBucket)
* [`ListObjects`](https://docs.minio.io/docs/golang-client-api-reference#ListObjects)
* [`ListObjectsV2`](https://docs.minio.io/docs/golang-client-api-reference#ListObjectsV2)
* [`ListIncompleteUploads`](https://docs.minio.io/docs/golang-client-api-reference#ListIncompleteUploads)
* [`MakeBucket`](https://docs.min.io/docs/golang-client-api-reference#MakeBucket)
* [`ListBuckets`](https://docs.min.io/docs/golang-client-api-reference#ListBuckets)
* [`BucketExists`](https://docs.min.io/docs/golang-client-api-reference#BucketExists)
* [`RemoveBucket`](https://docs.min.io/docs/golang-client-api-reference#RemoveBucket)
* [`ListObjects`](https://docs.min.io/docs/golang-client-api-reference#ListObjects)
* [`ListObjectsV2`](https://docs.min.io/docs/golang-client-api-reference#ListObjectsV2)
* [`ListIncompleteUploads`](https://docs.min.io/docs/golang-client-api-reference#ListIncompleteUploads)
### API文档 : 存储桶策略
* [`SetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketPolicy)
* [`GetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketPolicy)
* [`SetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#SetBucketPolicy)
* [`GetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#GetBucketPolicy)
### API文档 : 存储桶通知
* [`SetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketNotification)
* [`GetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketNotification)
* [`RemoveAllBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#RemoveAllBucketNotification)
* [`ListenBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#ListenBucketNotification) (Minio Extension)
* [`SetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#SetBucketNotification)
* [`GetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#GetBucketNotification)
* [`RemoveAllBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#RemoveAllBucketNotification)
* [`ListenBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#ListenBucketNotification) (MinIO Extension)
### API文档 : 操作文件对象
* [`FPutObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject)
* [`FGetObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject)
* [`FPutObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FPutObjectWithContext)
* [`FGetObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FGetObjectWithContext)
* [`FPutObject`](https://docs.min.io/docs/golang-client-api-reference#FPutObject)
* [`FGetObject`](https://docs.min.io/docs/golang-client-api-reference#FPutObject)
* [`FPutObjectWithContext`](https://docs.min.io/docs/golang-client-api-reference#FPutObjectWithContext)
* [`FGetObjectWithContext`](https://docs.min.io/docs/golang-client-api-reference#FGetObjectWithContext)
### API文档 : 操作对象
* [`GetObject`](https://docs.minio.io/docs/golang-client-api-reference#GetObject)
* [`PutObject`](https://docs.minio.io/docs/golang-client-api-reference#PutObject)
* [`GetObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#GetObjectWithContext)
* [`PutObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#PutObjectWithContext)
* [`PutObjectStreaming`](https://docs.minio.io/docs/golang-client-api-reference#PutObjectStreaming)
* [`StatObject`](https://docs.minio.io/docs/golang-client-api-reference#StatObject)
* [`CopyObject`](https://docs.minio.io/docs/golang-client-api-reference#CopyObject)
* [`RemoveObject`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObject)
* [`RemoveObjects`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObjects)
* [`RemoveIncompleteUpload`](https://docs.minio.io/docs/golang-client-api-reference#RemoveIncompleteUpload)
* [`GetObject`](https://docs.min.io/docs/golang-client-api-reference#GetObject)
* [`PutObject`](https://docs.min.io/docs/golang-client-api-reference#PutObject)
* [`GetObjectWithContext`](https://docs.min.io/docs/golang-client-api-reference#GetObjectWithContext)
* [`PutObjectWithContext`](https://docs.min.io/docs/golang-client-api-reference#PutObjectWithContext)
* [`PutObjectStreaming`](https://docs.min.io/docs/golang-client-api-reference#PutObjectStreaming)
* [`StatObject`](https://docs.min.io/docs/golang-client-api-reference#StatObject)
* [`CopyObject`](https://docs.min.io/docs/golang-client-api-reference#CopyObject)
* [`RemoveObject`](https://docs.min.io/docs/golang-client-api-reference#RemoveObject)
* [`RemoveObjects`](https://docs.min.io/docs/golang-client-api-reference#RemoveObjects)
* [`RemoveIncompleteUpload`](https://docs.min.io/docs/golang-client-api-reference#RemoveIncompleteUpload)
### API文档: 操作加密对象
* [`GetEncryptedObject`](https://docs.minio.io/docs/golang-client-api-reference#GetEncryptedObject)
* [`PutEncryptedObject`](https://docs.minio.io/docs/golang-client-api-reference#PutEncryptedObject)
* [`GetEncryptedObject`](https://docs.min.io/docs/golang-client-api-reference#GetEncryptedObject)
* [`PutEncryptedObject`](https://docs.min.io/docs/golang-client-api-reference#PutEncryptedObject)
### API文档 : Presigned操作
* [`PresignedGetObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedGetObject)
* [`PresignedPutObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPutObject)
* [`PresignedHeadObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedHeadObject)
* [`PresignedPostPolicy`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPostPolicy)
* [`PresignedGetObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedGetObject)
* [`PresignedPutObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedPutObject)
* [`PresignedHeadObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedHeadObject)
* [`PresignedPostPolicy`](https://docs.min.io/docs/golang-client-api-reference#PresignedPostPolicy)
### API文档 : 客户端自定义设置
* [`SetAppInfo`](http://docs.minio.io/docs/golang-client-api-reference#SetAppInfo)
* [`SetCustomTransport`](http://docs.minio.io/docs/golang-client-api-reference#SetCustomTransport)
* [`TraceOn`](http://docs.minio.io/docs/golang-client-api-reference#TraceOn)
* [`TraceOff`](http://docs.minio.io/docs/golang-client-api-reference#TraceOff)
* [`SetAppInfo`](http://docs.min.io/docs/golang-client-api-reference#SetAppInfo)
* [`SetCustomTransport`](http://docs.min.io/docs/golang-client-api-reference#SetCustomTransport)
* [`TraceOn`](http://docs.min.io/docs/golang-client-api-reference#TraceOn)
* [`TraceOff`](http://docs.min.io/docs/golang-client-api-reference#TraceOff)
## 完整示例
@ -202,7 +202,7 @@ mc ls play/mymusic/
* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go)
* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go)
* [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go)
* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (Minio扩展)
* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (MinIO扩展)
### 完整示例 : 操作文件对象
* [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go)
@ -233,9 +233,8 @@ mc ls play/mymusic/
* [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go)
## 了解更多
* [完整文档](https://docs.minio.io)
* [Minio Go Client SDK API文档](https://docs.minio.io/docs/golang-client-api-reference)
* [Go 音乐播放器完整示例](https://docs.minio.io/docs/go-music-player-app)
* [完整文档](https://docs.min.io)
* [MinIO Go Client SDK API文档](https://docs.min.io/docs/golang-client-api-reference)
## 贡献
[贡献指南](https://github.com/minio/minio-go/blob/master/docs/zh_CN/CONTRIBUTING.md)

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017, 2018 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017, 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -28,8 +28,8 @@ import (
"strings"
"time"
"github.com/minio/minio-go/pkg/encrypt"
"github.com/minio/minio-go/pkg/s3utils"
"github.com/minio/minio-go/v6/pkg/encrypt"
"github.com/minio/minio-go/v6/pkg/s3utils"
)
// DestinationInfo - type with information about the object to be

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -18,6 +18,8 @@
package minio
import (
"encoding/xml"
"io"
"net/http"
"time"
)
@ -30,6 +32,36 @@ type BucketInfo struct {
CreationDate time.Time `json:"creationDate"`
}
// StringMap represents map with custom UnmarshalXML
type StringMap map[string]string
// UnmarshalXML unmarshals the XML into a map of string to strings,
// creating a key in the map for each tag and setting it's value to the
// tags contents.
//
// The fact this function is on the pointer of Map is important, so that
// if m is nil it can be initialized, which is often the case if m is
// nested in another xml structurel. This is also why the first thing done
// on the first line is initialize it.
func (m *StringMap) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
*m = StringMap{}
type xmlMapEntry struct {
XMLName xml.Name
Value string `xml:",chardata"`
}
for {
var e xmlMapEntry
err := d.Decode(&e)
if err == io.EOF {
break
} else if err != nil {
return err
}
(*m)[e.XMLName.Local] = e.Value
}
return nil
}
// ObjectInfo container for object metadata.
type ObjectInfo struct {
// An ETag is optionally set to md5sum of an object. In case of multipart objects,
@ -41,11 +73,15 @@ type ObjectInfo struct {
LastModified time.Time `json:"lastModified"` // Date and time the object was last modified.
Size int64 `json:"size"` // Size in bytes of the object.
ContentType string `json:"contentType"` // A standard MIME type describing the format of the object data.
Expires time.Time `json:"expires"` // The date and time at which the object is no longer able to be cached.
// Collection of additional metadata on the object.
// eg: x-amz-meta-*, content-encoding etc.
Metadata http.Header `json:"metadata" xml:"-"`
// x-amz-meta-* headers stripped "x-amz-meta-" prefix containing the first value.
UserMetadata StringMap `json:"userMetadata"`
// Owner name.
Owner struct {
DisplayName string `json:"name"`

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -60,7 +60,7 @@ type ErrorResponse struct {
//
// For example:
//
// import s3 "github.com/minio/minio-go"
// import s3 "github.com/minio/minio-go/v6"
// ...
// ...
// reader, stat, err := s3.GetObject(...)

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -23,7 +23,7 @@ import (
"net/http"
"net/url"
"github.com/minio/minio-go/pkg/s3utils"
"github.com/minio/minio-go/v6/pkg/s3utils"
)
// GetBucketLifecycle - get bucket lifecycle.

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2018 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -23,7 +23,7 @@ import (
"os"
"path/filepath"
"github.com/minio/minio-go/pkg/s3utils"
"github.com/minio/minio-go/v6/pkg/s3utils"
)
// FGetObjectWithContext - download contents of an object to a local file.
@ -100,7 +100,7 @@ func (c Client) fGetObjectWithContext(ctx context.Context, bucketName, objectNam
}
// Seek to current position for incoming reader.
objectReader, objectStat, err := c.getObject(ctx, bucketName, objectName, opts)
objectReader, objectStat, _, err := c.getObject(ctx, bucketName, objectName, opts)
if err != nil {
return err
}

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -27,7 +27,7 @@ import (
"sync"
"time"
"github.com/minio/minio-go/pkg/s3utils"
"github.com/minio/minio-go/v6/pkg/s3utils"
)
// GetObject - returns an seekable, readable object.
@ -92,7 +92,7 @@ func (c Client) getObjectWithContext(ctx context.Context, bucketName, objectName
} else if req.Offset > 0 {
opts.SetRange(req.Offset, 0)
}
httpReader, objectInfo, err = c.getObject(ctx, bucketName, objectName, opts)
httpReader, objectInfo, _, err = c.getObject(ctx, bucketName, objectName, opts)
if err != nil {
resCh <- getResponse{Error: err}
return
@ -173,7 +173,7 @@ func (c Client) getObjectWithContext(ctx context.Context, bucketName, objectName
} else if req.Offset > 0 { // Range is set with respect to the offset.
opts.SetRange(req.Offset, 0)
}
httpReader, objectInfo, err = c.getObject(ctx, bucketName, objectName, opts)
httpReader, objectInfo, _, err = c.getObject(ctx, bucketName, objectName, opts)
if err != nil {
resCh <- getResponse{
Error: err,
@ -321,6 +321,7 @@ func (o *Object) Read(b []byte) (n int, err error) {
if o.prevErr != nil || o.isClosed {
return 0, o.prevErr
}
// Create a new request.
readReq := getRequest{
isReadOp: true,
@ -403,10 +404,13 @@ func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) {
defer o.mutex.Unlock()
// prevErr is error which was saved in previous operation.
if o.prevErr != nil || o.isClosed {
if o.prevErr != nil && o.prevErr != io.EOF || o.isClosed {
return 0, o.prevErr
}
// Set the current offset to ReadAt offset, because the current offset will be shifted at the end of this method.
o.currOffset = offset
// Can only compare offsets to size when size has been set.
if o.objectInfoSet {
// If offset is negative than we return io.EOF.
@ -476,11 +480,9 @@ func (o *Object) Seek(offset int64, whence int) (n int64, err error) {
o.mutex.Lock()
defer o.mutex.Unlock()
if o.prevErr != nil {
// At EOF seeking is legal allow only io.EOF, for any other errors we return.
if o.prevErr != io.EOF {
return 0, o.prevErr
}
// At EOF seeking is legal allow only io.EOF, for any other errors we return.
if o.prevErr != nil && o.prevErr != io.EOF {
return 0, o.prevErr
}
// Negative offset is valid for whence of '2'.
@ -594,13 +596,13 @@ func newObject(reqCh chan<- getRequest, resCh <-chan getResponse, doneCh chan<-
//
// For more information about the HTTP Range header.
// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
func (c Client) getObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, error) {
func (c Client) getObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, http.Header, error) {
// Validate input arguments.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return nil, ObjectInfo{}, err
return nil, ObjectInfo{}, nil, err
}
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return nil, ObjectInfo{}, err
return nil, ObjectInfo{}, nil, err
}
// Execute GET on objectName.
@ -611,11 +613,11 @@ func (c Client) getObject(ctx context.Context, bucketName, objectName string, op
contentSHA256Hex: emptySHA256Hex,
})
if err != nil {
return nil, ObjectInfo{}, err
return nil, ObjectInfo{}, nil, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
return nil, ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName)
return nil, ObjectInfo{}, nil, httpRespToErrorResponse(resp, bucketName, objectName)
}
}
@ -627,7 +629,7 @@ func (c Client) getObject(ctx context.Context, bucketName, objectName string, op
date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified"))
if err != nil {
msg := "Last-Modified time format not recognized. " + reportIssue
return nil, ObjectInfo{}, ErrorResponse{
return nil, ObjectInfo{}, nil, ErrorResponse{
Code: "InternalError",
Message: msg,
RequestID: resp.Header.Get("x-amz-request-id"),
@ -655,5 +657,5 @@ func (c Client) getObject(ctx context.Context, bucketName, objectName string, op
}
// do not close body here, caller will close
return resp.Body, objectStat, nil
return resp.Body, objectStat, resp.Header, nil
}

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -22,7 +22,7 @@ import (
"net/http"
"time"
"github.com/minio/minio-go/pkg/encrypt"
"github.com/minio/minio-go/v6/pkg/encrypt"
)
// GetObjectOptions are used to specify additional headers or options

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -23,7 +23,7 @@ import (
"net/http"
"net/url"
"github.com/minio/minio-go/pkg/s3utils"
"github.com/minio/minio-go/v6/pkg/s3utils"
)
// GetBucketPolicy - get bucket policy at a given path.

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -25,7 +25,7 @@ import (
"net/url"
"strings"
"github.com/minio/minio-go/pkg/s3utils"
"github.com/minio/minio-go/v6/pkg/s3utils"
)
// ListBuckets list all buckets owned by this authenticated user.
@ -60,9 +60,13 @@ func (c Client) ListBuckets() ([]BucketInfo, error) {
/// Bucket Read Operations.
// ListObjectsV2 lists all objects matching the objectPrefix from
// the specified bucket. If recursion is enabled it would list
// all subdirectories and all its contents.
// ListObjectsV2WithMetadata lists all objects matching the objectPrefix
// from the specified bucket. If recursion is enabled it would list
// all subdirectories and all its contents. This call adds
// UserMetadata information as well for each object.
//
// This is a MinIO extension, this will not work against other S3
// compatible object storage vendors.
//
// Your input parameters are just bucketName, objectPrefix, recursive
// and a done channel for pro-actively closing the internal go
@ -76,11 +80,18 @@ func (c Client) ListBuckets() ([]BucketInfo, error) {
// defer close(doneCh)
// // Recursively list all objects in 'mytestbucket'
// recursive := true
// for message := range api.ListObjectsV2("mytestbucket", "starthere", recursive, doneCh) {
// // Add metadata
// metadata := true
// for message := range api.ListObjectsV2WithMetadata("mytestbucket", "starthere", recursive, doneCh) {
// fmt.Println(message)
// }
//
func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo {
func (c Client) ListObjectsV2WithMetadata(bucketName, objectPrefix string, recursive bool,
doneCh <-chan struct{}) <-chan ObjectInfo {
return c.listObjectsV2(bucketName, objectPrefix, recursive, true, doneCh)
}
func (c Client) listObjectsV2(bucketName, objectPrefix string, recursive, metadata bool, doneCh <-chan struct{}) <-chan ObjectInfo {
// Allocate new list objects channel.
objectStatCh := make(chan ObjectInfo, 1)
// Default listing is delimited at "/"
@ -118,7 +129,8 @@ func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, d
var continuationToken string
for {
// Get list of objects a maximum of 1000 per request.
result, err := c.listObjectsV2Query(bucketName, objectPrefix, continuationToken, fetchOwner, delimiter, 1000, "")
result, err := c.listObjectsV2Query(bucketName, objectPrefix, continuationToken,
fetchOwner, metadata, delimiter, 0, "")
if err != nil {
objectStatCh <- ObjectInfo{
Err: err,
@ -142,10 +154,7 @@ func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, d
for _, obj := range result.CommonPrefixes {
select {
// Send object prefixes.
case objectStatCh <- ObjectInfo{
Key: obj.Prefix,
Size: 0,
}:
case objectStatCh <- ObjectInfo{Key: obj.Prefix}:
// If receives done from the caller, return here.
case <-doneCh:
return
@ -166,6 +175,30 @@ func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, d
return objectStatCh
}
// ListObjectsV2 lists all objects matching the objectPrefix from
// the specified bucket. If recursion is enabled it would list
// all subdirectories and all its contents.
//
// Your input parameters are just bucketName, objectPrefix, recursive
// and a done channel for pro-actively closing the internal go
// routine. If you enable recursive as 'true' this function will
// return back all the objects in a given bucket name and object
// prefix.
//
// api := client.New(....)
// // Create a done channel.
// doneCh := make(chan struct{})
// defer close(doneCh)
// // Recursively list all objects in 'mytestbucket'
// recursive := true
// for message := range api.ListObjectsV2("mytestbucket", "starthere", recursive, doneCh) {
// fmt.Println(message)
// }
//
func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo {
return c.listObjectsV2(bucketName, objectPrefix, recursive, false, doneCh)
}
// listObjectsV2Query - (List Objects V2) - List some or all (up to 1000) of the objects in a bucket.
//
// You can use the request parameters as selection criteria to return a subset of the objects in a bucket.
@ -176,7 +209,8 @@ func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, d
// ?prefix - Limits the response to keys that begin with the specified prefix.
// ?max-keys - Sets the maximum number of keys returned in the response body.
// ?start-after - Specifies the key to start after when listing objects in a bucket.
func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int, startAfter string) (ListBucketV2Result, error) {
// ?metadata - Specifies if we want metadata for the objects as part of list operation.
func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken string, fetchOwner, metadata bool, delimiter string, maxkeys int, startAfter string) (ListBucketV2Result, error) {
// Validate bucket name.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return ListBucketV2Result{}, err
@ -192,6 +226,13 @@ func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken s
// Always set list-type in ListObjects V2
urlValues.Set("list-type", "2")
if metadata {
urlValues.Set("metadata", "true")
}
// Always set encoding-type in ListObjects V2
urlValues.Set("encoding-type", "url")
// Set object prefix, prefix value to be set to empty is okay.
urlValues.Set("prefix", objectPrefix)
@ -208,12 +249,10 @@ func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken s
urlValues.Set("fetch-owner", "true")
}
// maxkeys should default to 1000 or less.
if maxkeys == 0 || maxkeys > 1000 {
maxkeys = 1000
}
// Set max keys.
urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys))
if maxkeys > 0 {
urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys))
}
// Set start-after
if startAfter != "" {
@ -248,6 +287,20 @@ func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken s
return listBucketResult, errors.New("Truncated response should have continuation token set")
}
for i, obj := range listBucketResult.Contents {
listBucketResult.Contents[i].Key, err = url.QueryUnescape(obj.Key)
if err != nil {
return listBucketResult, err
}
}
for i, obj := range listBucketResult.CommonPrefixes {
listBucketResult.CommonPrefixes[i].Prefix, err = url.QueryUnescape(obj.Prefix)
if err != nil {
return listBucketResult, err
}
}
// Success.
return listBucketResult, nil
}
@ -307,7 +360,7 @@ func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, don
var marker string
for {
// Get list of objects a maximum of 1000 per request.
result, err := c.listObjectsQuery(bucketName, objectPrefix, marker, delimiter, 1000)
result, err := c.listObjectsQuery(bucketName, objectPrefix, marker, delimiter, 0)
if err != nil {
objectStatCh <- ObjectInfo{
Err: err,
@ -331,12 +384,9 @@ func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, don
// Send all common prefixes if any.
// NOTE: prefixes are only present if the request is delimited.
for _, obj := range result.CommonPrefixes {
object := ObjectInfo{}
object.Key = obj.Prefix
object.Size = 0
select {
// Send object prefixes.
case objectStatCh <- object:
case objectStatCh <- ObjectInfo{Key: obj.Prefix}:
// If receives done from the caller, return here.
case <-doneCh:
return
@ -390,12 +440,13 @@ func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimit
urlValues.Set("marker", objectMarker)
}
// maxkeys should default to 1000 or less.
if maxkeys == 0 || maxkeys > 1000 {
maxkeys = 1000
}
// Set max keys.
urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys))
if maxkeys > 0 {
urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys))
}
// Always set encoding-type
urlValues.Set("encoding-type", "url")
// Execute GET on bucket to list objects.
resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
@ -418,6 +469,28 @@ func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimit
if err != nil {
return listBucketResult, err
}
for i, obj := range listBucketResult.Contents {
listBucketResult.Contents[i].Key, err = url.QueryUnescape(obj.Key)
if err != nil {
return listBucketResult, err
}
}
for i, obj := range listBucketResult.CommonPrefixes {
listBucketResult.CommonPrefixes[i].Prefix, err = url.QueryUnescape(obj.Prefix)
if err != nil {
return listBucketResult, err
}
}
if listBucketResult.NextMarker != "" {
listBucketResult.NextMarker, err = url.QueryUnescape(listBucketResult.NextMarker)
if err != nil {
return listBucketResult, err
}
}
return listBucketResult, nil
}
@ -481,16 +554,16 @@ func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive
var uploadIDMarker string
for {
// list all multipart uploads.
result, err := c.listMultipartUploadsQuery(bucketName, objectMarker, uploadIDMarker, objectPrefix, delimiter, 1000)
result, err := c.listMultipartUploadsQuery(bucketName, objectMarker, uploadIDMarker, objectPrefix, delimiter, 0)
if err != nil {
objectMultipartStatCh <- ObjectMultipartInfo{
Err: err,
}
return
}
// Save objectMarker and uploadIDMarker for next request.
objectMarker = result.NextKeyMarker
uploadIDMarker = result.NextUploadIDMarker
// Send all multipart uploads.
for _, obj := range result.Uploads {
// Calculate total size of the uploaded parts if 'aggregateSize' is enabled.
@ -515,12 +588,9 @@ func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive
// Send all common prefixes if any.
// NOTE: prefixes are only present if the request is delimited.
for _, obj := range result.CommonPrefixes {
object := ObjectMultipartInfo{}
object.Key = obj.Prefix
object.Size = 0
select {
// Send delimited prefixes here.
case objectMultipartStatCh <- object:
case objectMultipartStatCh <- ObjectMultipartInfo{Key: obj.Prefix, Size: 0}:
// If done channel return here.
case <-doneCh:
return
@ -567,12 +637,14 @@ func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker,
// Set delimiter, delimiter value to be set to empty is okay.
urlValues.Set("delimiter", delimiter)
// Always set encoding-type
urlValues.Set("encoding-type", "url")
// maxUploads should be 1000 or less.
if maxUploads == 0 || maxUploads > 1000 {
maxUploads = 1000
if maxUploads > 0 {
// Set max-uploads.
urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads))
}
// Set max-uploads.
urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads))
// Execute GET on bucketName to list multipart uploads.
resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
@ -595,6 +667,31 @@ func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker,
if err != nil {
return listMultipartUploadsResult, err
}
listMultipartUploadsResult.NextKeyMarker, err = url.QueryUnescape(listMultipartUploadsResult.NextKeyMarker)
if err != nil {
return listMultipartUploadsResult, err
}
listMultipartUploadsResult.NextUploadIDMarker, err = url.QueryUnescape(listMultipartUploadsResult.NextUploadIDMarker)
if err != nil {
return listMultipartUploadsResult, err
}
for i, obj := range listMultipartUploadsResult.Uploads {
listMultipartUploadsResult.Uploads[i].Key, err = url.QueryUnescape(obj.Key)
if err != nil {
return listMultipartUploadsResult, err
}
}
for i, obj := range listMultipartUploadsResult.CommonPrefixes {
listMultipartUploadsResult.CommonPrefixes[i].Prefix, err = url.QueryUnescape(obj.Prefix)
if err != nil {
return listMultipartUploadsResult, err
}
}
return listMultipartUploadsResult, nil
}
@ -683,11 +780,10 @@ func (c Client) listObjectPartsQuery(bucketName, objectName, uploadID string, pa
urlValues.Set("uploadId", uploadID)
// maxParts should be 1000 or less.
if maxParts == 0 || maxParts > 1000 {
maxParts = 1000
if maxParts > 0 {
// Set max parts.
urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts))
}
// Set max parts.
urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts))
// Execute GET on objectName to get list of parts.
resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -21,12 +21,11 @@ import (
"bufio"
"context"
"encoding/json"
"io"
"net/http"
"net/url"
"time"
"github.com/minio/minio-go/pkg/s3utils"
"github.com/minio/minio-go/v6/pkg/s3utils"
)
// GetBucketNotification - get bucket notification at a given path.
@ -164,13 +163,14 @@ func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, even
// Indicate to our routine to exit cleanly upon return.
defer close(retryDoneCh)
// Prepare urlValues to pass into the request on every loop
urlValues := make(url.Values)
urlValues.Set("prefix", prefix)
urlValues.Set("suffix", suffix)
urlValues["events"] = events
// Wait on the jitter retry loop.
for range c.newRetryTimerContinous(time.Second, time.Second*30, MaxJitter, retryDoneCh) {
urlValues := make(url.Values)
urlValues.Set("prefix", prefix)
urlValues.Set("suffix", suffix)
urlValues["events"] = events
// Execute GET on bucket to list objects.
resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
bucketName: bucketName,
@ -196,30 +196,33 @@ func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, even
// Initialize a new bufio scanner, to read line by line.
bio := bufio.NewScanner(resp.Body)
// Close the response body.
defer resp.Body.Close()
// Unmarshal each line, returns marshalled values.
for bio.Scan() {
var notificationInfo NotificationInfo
if err = json.Unmarshal(bio.Bytes(), &notificationInfo); err != nil {
// Unexpected error during json unmarshal, send
// the error to caller for actionable as needed.
notificationInfoCh <- NotificationInfo{
Err: err,
}
closeResponse(resp)
continue
}
// Send notificationInfo
select {
case notificationInfoCh <- notificationInfo:
case <-doneCh:
closeResponse(resp)
return
}
}
// Look for any underlying errors.
if err = bio.Err(); err != nil {
// For an unexpected connection drop from server, we close the body
// and re-connect.
if err == io.ErrUnexpectedEOF {
resp.Body.Close()
notificationInfoCh <- NotificationInfo{
Err: err,
}
}
// Close current connection before looping further.
closeResponse(resp)
}
}(notificationInfoCh)

232
vendor/github.com/minio/minio-go/v6/api-object-lock.go generated vendored Normal file
View File

@ -0,0 +1,232 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"bytes"
"context"
"encoding/xml"
"fmt"
"net/http"
"net/url"
"time"
"github.com/minio/minio-go/v6/pkg/s3utils"
)
// RetentionMode - object retention mode.
type RetentionMode string
const (
// Governance - goverance mode.
Governance RetentionMode = "GOVERNANCE"
// Compliance - compliance mode.
Compliance RetentionMode = "COMPLIANCE"
)
func (r RetentionMode) String() string {
return string(r)
}
// IsValid - check whether this retention mode is valid or not.
func (r RetentionMode) IsValid() bool {
return r == Governance || r == Compliance
}
// ValidityUnit - retention validity unit.
type ValidityUnit string
const (
// Days - denotes no. of days.
Days ValidityUnit = "DAYS"
// Years - denotes no. of years.
Years ValidityUnit = "YEARS"
)
func (unit ValidityUnit) String() string {
return string(unit)
}
// IsValid - check whether this validity unit is valid or not.
func (unit ValidityUnit) isValid() bool {
return unit == Days || unit == Years
}
// Retention - bucket level retention configuration.
type Retention struct {
Mode RetentionMode
Validity time.Duration
}
func (r Retention) String() string {
return fmt.Sprintf("{Mode:%v, Validity:%v}", r.Mode, r.Validity)
}
// IsEmpty - returns whether retention is empty or not.
func (r Retention) IsEmpty() bool {
return r.Mode == "" || r.Validity == 0
}
// objectLockConfig - object lock configuration specified in
// https://docs.aws.amazon.com/AmazonS3/latest/API/Type_API_ObjectLockConfiguration.html
type objectLockConfig struct {
XMLNS string `xml:"xmlns,attr,omitempty"`
XMLName xml.Name `xml:"ObjectLockConfiguration"`
ObjectLockEnabled string `xml:"ObjectLockEnabled"`
Rule *struct {
DefaultRetention struct {
Mode RetentionMode `xml:"Mode"`
Days *uint `xml:"Days"`
Years *uint `xml:"Years"`
} `xml:"DefaultRetention"`
} `xml:"Rule,omitempty"`
}
func newObjectLockConfig(mode *RetentionMode, validity *uint, unit *ValidityUnit) (*objectLockConfig, error) {
config := &objectLockConfig{
ObjectLockEnabled: "Enabled",
}
if mode != nil && validity != nil && unit != nil {
if !mode.IsValid() {
return nil, fmt.Errorf("invalid retention mode `%v`", mode)
}
if !unit.isValid() {
return nil, fmt.Errorf("invalid validity unit `%v`", unit)
}
config.Rule = &struct {
DefaultRetention struct {
Mode RetentionMode `xml:"Mode"`
Days *uint `xml:"Days"`
Years *uint `xml:"Years"`
} `xml:"DefaultRetention"`
}{}
config.Rule.DefaultRetention.Mode = *mode
if *unit == Days {
config.Rule.DefaultRetention.Days = validity
} else {
config.Rule.DefaultRetention.Years = validity
}
return config, nil
}
if mode == nil && validity == nil && unit == nil {
return config, nil
}
return nil, fmt.Errorf("all of retention mode, validity and validity unit must be passed")
}
// SetBucketObjectLockConfig sets object lock configuration in given bucket. mode, validity and unit are either all set or all nil.
func (c Client) SetBucketObjectLockConfig(bucketName string, mode *RetentionMode, validity *uint, unit *ValidityUnit) error {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
}
// Get resources properly escaped and lined up before
// using them in http request.
urlValues := make(url.Values)
urlValues.Set("object-lock", "")
config, err := newObjectLockConfig(mode, validity, unit)
if err != nil {
return err
}
configData, err := xml.Marshal(config)
if err != nil {
return err
}
reqMetadata := requestMetadata{
bucketName: bucketName,
queryValues: urlValues,
contentBody: bytes.NewReader(configData),
contentLength: int64(len(configData)),
contentMD5Base64: sumMD5Base64(configData),
contentSHA256Hex: sum256Hex(configData),
}
// Execute PUT bucket object lock configuration.
resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata)
defer closeResponse(resp)
if err != nil {
return err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return httpRespToErrorResponse(resp, bucketName, "")
}
}
return nil
}
// GetBucketObjectLockConfig gets object lock configuration of given bucket.
func (c Client) GetBucketObjectLockConfig(bucketName string) (mode *RetentionMode, validity *uint, unit *ValidityUnit, err error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return nil, nil, nil, err
}
urlValues := make(url.Values)
urlValues.Set("object-lock", "")
// Execute GET on bucket to list objects.
resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
bucketName: bucketName,
queryValues: urlValues,
contentSHA256Hex: emptySHA256Hex,
})
defer closeResponse(resp)
if err != nil {
return nil, nil, nil, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return nil, nil, nil, httpRespToErrorResponse(resp, bucketName, "")
}
}
config := &objectLockConfig{}
if err = xml.NewDecoder(resp.Body).Decode(config); err != nil {
return nil, nil, nil, err
}
if config.Rule != nil {
mode = &config.Rule.DefaultRetention.Mode
if config.Rule.DefaultRetention.Days != nil {
validity = config.Rule.DefaultRetention.Days
days := Days
unit = &days
} else {
validity = config.Rule.DefaultRetention.Years
years := Years
unit = &years
}
return mode, validity, unit, nil
}
return nil, nil, nil, nil
}

View File

@ -0,0 +1,168 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"bytes"
"context"
"encoding/xml"
"fmt"
"net/http"
"net/url"
"time"
"github.com/minio/minio-go/v6/pkg/s3utils"
)
// objectRetention - object retention specified in
// https://docs.aws.amazon.com/AmazonS3/latest/API/Type_API_ObjectLockConfiguration.html
type objectRetention struct {
XMLNS string `xml:"xmlns,attr,omitempty"`
XMLName xml.Name `xml:"Retention"`
Mode RetentionMode `xml:"Mode"`
RetainUntilDate time.Time `type:"timestamp" timestampFormat:"iso8601" xml:"RetainUntilDate"`
}
func newObjectRetention(mode *RetentionMode, date *time.Time) (*objectRetention, error) {
if mode == nil {
return nil, fmt.Errorf("Mode not set")
}
if date == nil {
return nil, fmt.Errorf("RetainUntilDate not set")
}
if !mode.IsValid() {
return nil, fmt.Errorf("invalid retention mode `%v`", mode)
}
objectRetention := &objectRetention{
Mode: *mode,
RetainUntilDate: *date,
}
return objectRetention, nil
}
// PutObjectRetentionOptions represents options specified by user for PutObject call
type PutObjectRetentionOptions struct {
GovernanceBypass bool
Mode *RetentionMode
RetainUntilDate *time.Time
VersionID string
}
// PutObjectRetention : sets object retention for a given object and versionID.
func (c Client) PutObjectRetention(bucketName, objectName string, opts PutObjectRetentionOptions) error {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
}
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return err
}
// Get resources properly escaped and lined up before
// using them in http request.
urlValues := make(url.Values)
urlValues.Set("retention", "")
if opts.VersionID != "" {
urlValues.Set("versionId", opts.VersionID)
}
retention, err := newObjectRetention(opts.Mode, opts.RetainUntilDate)
if err != nil {
return err
}
retentionData, err := xml.Marshal(retention)
if err != nil {
return err
}
// Build headers.
headers := make(http.Header)
if opts.GovernanceBypass {
// Set the bypass goverenance retention header
headers.Set("x-amz-bypass-governance-retention", "True")
}
reqMetadata := requestMetadata{
bucketName: bucketName,
objectName: objectName,
queryValues: urlValues,
contentBody: bytes.NewReader(retentionData),
contentLength: int64(len(retentionData)),
contentMD5Base64: sumMD5Base64(retentionData),
contentSHA256Hex: sum256Hex(retentionData),
customHeader: headers,
}
// Execute PUT Object Retention.
resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata)
defer closeResponse(resp)
if err != nil {
return err
}
if resp != nil {
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
return httpRespToErrorResponse(resp, bucketName, objectName)
}
}
return nil
}
// GetObjectRetention gets retention of given object.
func (c Client) GetObjectRetention(bucketName, objectName, versionID string) (mode *RetentionMode, retainUntilDate *time.Time, err error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return nil, nil, err
}
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return nil, nil, err
}
urlValues := make(url.Values)
urlValues.Set("retention", "")
if versionID != "" {
urlValues.Set("versionId", versionID)
}
// Execute GET on bucket to list objects.
resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
bucketName: bucketName,
objectName: objectName,
queryValues: urlValues,
contentSHA256Hex: emptySHA256Hex,
})
defer closeResponse(resp)
if err != nil {
return nil, nil, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return nil, nil, httpRespToErrorResponse(resp, bucketName, objectName)
}
}
retention := &objectRetention{}
if err = xml.NewDecoder(resp.Body).Decode(retention); err != nil {
return nil, nil, err
}
return &retention.Mode, &retention.RetainUntilDate, nil
}

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -23,8 +23,8 @@ import (
"net/url"
"time"
"github.com/minio/minio-go/pkg/s3signer"
"github.com/minio/minio-go/pkg/s3utils"
"github.com/minio/minio-go/v6/pkg/s3signer"
"github.com/minio/minio-go/v6/pkg/s3utils"
)
// presignURL - Returns a presigned URL for an input 'method'.

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -26,19 +26,12 @@ import (
"net/url"
"strings"
"github.com/minio/minio-go/pkg/s3utils"
"github.com/minio/minio-go/v6/pkg/s3utils"
)
/// Bucket operations
// MakeBucket creates a new bucket with bucketName.
//
// Location is an optional argument, by default all buckets are
// created in US Standard Region.
//
// For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html
// For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations
func (c Client) MakeBucket(bucketName string, location string) (err error) {
func (c Client) makeBucket(bucketName string, location string, objectLockEnabled bool) (err error) {
defer func() {
// Save the location into cache on a successful makeBucket response.
if err == nil {
@ -66,6 +59,12 @@ func (c Client) MakeBucket(bucketName string, location string) (err error) {
bucketLocation: location,
}
if objectLockEnabled {
headers := make(http.Header)
headers.Add("x-amz-bucket-object-lock-enabled", "true")
reqMetadata.customHeader = headers
}
// If location is not 'us-east-1' create bucket location config.
if location != "us-east-1" && location != "" {
createBucketConfig := createBucketConfiguration{}
@ -98,6 +97,28 @@ func (c Client) MakeBucket(bucketName string, location string) (err error) {
return nil
}
// MakeBucket creates a new bucket with bucketName.
//
// Location is an optional argument, by default all buckets are
// created in US Standard Region.
//
// For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html
// For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations
func (c Client) MakeBucket(bucketName string, location string) (err error) {
return c.makeBucket(bucketName, location, false)
}
// MakeBucketWithObjectLock creates a object lock enabled new bucket with bucketName.
//
// Location is an optional argument, by default all buckets are
// created in US Standard Region.
//
// For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html
// For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations
func (c Client) MakeBucketWithObjectLock(bucketName string, location string) (err error) {
return c.makeBucket(bucketName, location, true)
}
// SetBucketPolicy set the access permissions on an existing bucket.
func (c Client) SetBucketPolicy(bucketName, policy string) error {
// Input validation.
@ -304,3 +325,59 @@ func (c Client) SetBucketNotification(bucketName string, bucketNotification Buck
func (c Client) RemoveAllBucketNotification(bucketName string) error {
return c.SetBucketNotification(bucketName, BucketNotification{})
}
var (
versionEnableConfig = []byte("<VersioningConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"><Status>Enabled</Status></VersioningConfiguration>")
versionEnableConfigLen = int64(len(versionEnableConfig))
versionEnableConfigMD5Sum = sumMD5Base64(versionEnableConfig)
versionEnableConfigSHA256 = sum256Hex(versionEnableConfig)
versionDisableConfig = []byte("<VersioningConfiguration xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"><Status>Suspended</Status></VersioningConfiguration>")
versionDisableConfigLen = int64(len(versionDisableConfig))
versionDisableConfigMD5Sum = sumMD5Base64(versionDisableConfig)
versionDisableConfigSHA256 = sum256Hex(versionDisableConfig)
)
func (c Client) setVersioning(bucketName string, config []byte, length int64, md5sum, sha256sum string) error {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
}
// Get resources properly escaped and lined up before
// using them in http request.
urlValues := make(url.Values)
urlValues.Set("versioning", "")
reqMetadata := requestMetadata{
bucketName: bucketName,
queryValues: urlValues,
contentBody: bytes.NewReader(config),
contentLength: length,
contentMD5Base64: md5sum,
contentSHA256Hex: sha256sum,
}
// Execute PUT to set a bucket versioning.
resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata)
defer closeResponse(resp)
if err != nil {
return err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return httpRespToErrorResponse(resp, bucketName, "")
}
}
return nil
}
// EnableVersioning - Enable object versioning in given bucket.
func (c Client) EnableVersioning(bucketName string) error {
return c.setVersioning(bucketName, versionEnableConfig, versionEnableConfigLen, versionEnableConfigMD5Sum, versionEnableConfigSHA256)
}
// DisableVersioning - Disable object versioning in given bucket.
func (c Client) DisableVersioning(bucketName string) error {
return c.setVersioning(bucketName, versionDisableConfig, versionDisableConfigLen, versionDisableConfigMD5Sum, versionDisableConfigSHA256)
}

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -23,7 +23,7 @@ import (
"math"
"os"
"github.com/minio/minio-go/pkg/s3utils"
"github.com/minio/minio-go/v6/pkg/s3utils"
)
// Verify if reader is *minio.Object
@ -34,26 +34,25 @@ func isObject(reader io.Reader) (ok bool) {
// Verify if reader is a generic ReaderAt
func isReadAt(reader io.Reader) (ok bool) {
_, ok = reader.(io.ReaderAt)
var v *os.File
v, ok = reader.(*os.File)
if ok {
var v *os.File
v, ok = reader.(*os.File)
if ok {
// Stdin, Stdout and Stderr all have *os.File type
// which happen to also be io.ReaderAt compatible
// we need to add special conditions for them to
// be ignored by this function.
for _, f := range []string{
"/dev/stdin",
"/dev/stdout",
"/dev/stderr",
} {
if f == v.Name() {
ok = false
break
}
// Stdin, Stdout and Stderr all have *os.File type
// which happen to also be io.ReaderAt compatible
// we need to add special conditions for them to
// be ignored by this function.
for _, f := range []string{
"/dev/stdin",
"/dev/stdout",
"/dev/stderr",
} {
if f == v.Name() {
ok = false
break
}
}
} else {
_, ok = reader.(io.ReaderAt)
}
return
}
@ -65,23 +64,51 @@ func isReadAt(reader io.Reader) (ok bool) {
// object storage it will have the following parameters as constants.
//
// maxPartsCount - 10000
// minPartSize - 64MiB
// minPartSize - 128MiB
// maxMultipartPutObjectSize - 5TiB
//
func optimalPartInfo(objectSize int64) (totalPartsCount int, partSize int64, lastPartSize int64, err error) {
func optimalPartInfo(objectSize int64, configuredPartSize uint64) (totalPartsCount int, partSize int64, lastPartSize int64, err error) {
// object size is '-1' set it to 5TiB.
if objectSize == -1 {
objectSize = maxMultipartPutObjectSize
}
// object size is larger than supported maximum.
if objectSize > maxMultipartPutObjectSize {
err = ErrEntityTooLarge(objectSize, maxMultipartPutObjectSize, "", "")
return
}
// Use floats for part size for all calculations to avoid
// overflows during float64 to int64 conversions.
partSizeFlt := math.Ceil(float64(objectSize / maxPartsCount))
partSizeFlt = math.Ceil(partSizeFlt/minPartSize) * minPartSize
var partSizeFlt float64
if configuredPartSize > 0 {
if int64(configuredPartSize) > objectSize {
err = ErrEntityTooLarge(int64(configuredPartSize), objectSize, "", "")
return
}
if objectSize > (int64(configuredPartSize) * maxPartsCount) {
err = ErrInvalidArgument("Part size * max_parts(10000) is lesser than input objectSize.")
return
}
if configuredPartSize < absMinPartSize {
err = ErrInvalidArgument("Input part size is smaller than allowed minimum of 5MiB.")
return
}
if configuredPartSize > maxPartSize {
err = ErrInvalidArgument("Input part size is bigger than allowed maximum of 5GiB.")
return
}
partSizeFlt = float64(configuredPartSize)
} else {
configuredPartSize = minPartSize
// Use floats for part size for all calculations to avoid
// overflows during float64 to int64 conversions.
partSizeFlt = float64(objectSize / maxPartsCount)
partSizeFlt = math.Ceil(partSizeFlt/float64(configuredPartSize)) * float64(configuredPartSize)
}
// Total parts count.
totalPartsCount = int(math.Ceil(float64(objectSize) / partSizeFlt))
// Part size.

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017, 2018 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017, 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -23,7 +23,7 @@ import (
"io/ioutil"
"net/http"
"github.com/minio/minio-go/pkg/encrypt"
"github.com/minio/minio-go/v6/pkg/encrypt"
)
// CopyObject - copy a source object into a new object

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -23,7 +23,7 @@ import (
"os"
"path/filepath"
"github.com/minio/minio-go/pkg/s3utils"
"github.com/minio/minio-go/v6/pkg/s3utils"
)
// FPutObjectWithContext - Create an object in a bucket, with contents from file at filePath. Allows request cancellation.

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -33,8 +33,8 @@ import (
"strconv"
"strings"
"github.com/minio/minio-go/pkg/encrypt"
"github.com/minio/minio-go/pkg/s3utils"
"github.com/minio/minio-go/v6/pkg/encrypt"
"github.com/minio/minio-go/v6/pkg/s3utils"
)
func (c Client) putObjectMultipart(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64,
@ -73,7 +73,7 @@ func (c Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obje
var complMultipartUpload completeMultipartUpload
// Calculate the optimal parts info for a given size.
totalPartsCount, partSize, _, err := optimalPartInfo(-1)
totalPartsCount, partSize, _, err := optimalPartInfo(-1, opts.PartSize)
if err != nil {
return 0, err
}

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -25,7 +25,7 @@ import (
"sort"
"strings"
"github.com/minio/minio-go/pkg/s3utils"
"github.com/minio/minio-go/v6/pkg/s3utils"
)
// putObjectMultipartStream - upload a large object using
@ -75,7 +75,7 @@ type uploadPartReq struct {
Part *ObjectPart // Size of the part uploaded.
}
// putObjectMultipartFromReadAt - Uploads files bigger than 64MiB.
// putObjectMultipartFromReadAt - Uploads files bigger than 128MiB.
// Supports all readers which implements io.ReaderAt interface
// (ReadAt method).
//
@ -97,7 +97,7 @@ func (c Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketNa
}
// Calculate the optimal parts info for a given size.
totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size)
totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size, opts.PartSize)
if err != nil {
return 0, err
}
@ -240,7 +240,7 @@ func (c Client) putObjectMultipartStreamNoChecksum(ctx context.Context, bucketNa
}
// Calculate the optimal parts info for a given size.
totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size)
totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size, opts.PartSize)
if err != nil {
return 0, err
}

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -25,9 +25,10 @@ import (
"net/http"
"runtime/debug"
"sort"
"time"
"github.com/minio/minio-go/pkg/encrypt"
"github.com/minio/minio-go/pkg/s3utils"
"github.com/minio/minio-go/v6/pkg/encrypt"
"github.com/minio/minio-go/v6/pkg/s3utils"
"golang.org/x/net/http/httpguts"
)
@ -40,10 +41,13 @@ type PutObjectOptions struct {
ContentDisposition string
ContentLanguage string
CacheControl string
Mode *RetentionMode
RetainUntilDate *time.Time
ServerSideEncryption encrypt.ServerSide
NumThreads uint
StorageClass string
WebsiteRedirectLocation string
PartSize uint64
}
// getNumThreads - gets the number of threads to be used in the multipart
@ -79,6 +83,14 @@ func (opts PutObjectOptions) Header() (header http.Header) {
if opts.CacheControl != "" {
header["Cache-Control"] = []string{opts.CacheControl}
}
if opts.Mode != nil {
header["x-amz-object-lock-mode"] = []string{opts.Mode.String()}
}
if opts.RetainUntilDate != nil {
header["x-amz-object-lock-retain-until-date"] = []string{opts.RetainUntilDate.Format(time.RFC3339)}
}
if opts.ServerSideEncryption != nil {
opts.ServerSideEncryption.Marshal(header)
}
@ -108,6 +120,11 @@ func (opts PutObjectOptions) validate() (err error) {
return ErrInvalidArgument(v + " unsupported user defined metadata value")
}
}
if opts.Mode != nil {
if !opts.Mode.IsValid() {
return ErrInvalidArgument(opts.Mode.String() + " unsupported retention mode")
}
}
return nil
}
@ -123,9 +140,9 @@ func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].Part
//
// You must have WRITE permissions on a bucket to create an object.
//
// - For size smaller than 64MiB PutObject automatically does a
// - For size smaller than 128MiB PutObject automatically does a
// single atomic Put operation.
// - For size larger than 64MiB PutObject automatically does a
// - For size larger than 128MiB PutObject automatically does a
// multipart Put operation.
// - For size input as -1 PutObject does a multipart Put operation
// until input stream reaches EOF. Maximum object size that can
@ -147,8 +164,13 @@ func (c Client) putObjectCommon(ctx context.Context, bucketName, objectName stri
return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts)
}
partSize := opts.PartSize
if opts.PartSize == 0 {
partSize = minPartSize
}
if c.overrideSignerType.IsV2() {
if size >= 0 && size < minPartSize {
if size >= 0 && size < int64(partSize) {
return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts)
}
return c.putObjectMultipart(ctx, bucketName, objectName, reader, size, opts)
@ -157,10 +179,11 @@ func (c Client) putObjectCommon(ctx context.Context, bucketName, objectName stri
return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, reader, opts)
}
if size < minPartSize {
if size < int64(partSize) {
return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts)
}
// For all sizes greater than 64MiB do multipart.
// For all sizes greater than 128MiB do multipart.
return c.putObjectMultipartStream(ctx, bucketName, objectName, reader, size, opts)
}
@ -181,7 +204,7 @@ func (c Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketName
var complMultipartUpload completeMultipartUpload
// Calculate the optimal parts info for a given size.
totalPartsCount, partSize, _, err := optimalPartInfo(-1)
totalPartsCount, partSize, _, err := optimalPartInfo(-1, opts.PartSize)
if err != nil {
return 0, err
}

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -25,7 +25,7 @@ import (
"net/http"
"net/url"
"github.com/minio/minio-go/pkg/s3utils"
"github.com/minio/minio-go/v6/pkg/s3utils"
)
// RemoveBucket deletes the bucket name.
@ -60,6 +60,17 @@ func (c Client) RemoveBucket(bucketName string) error {
// RemoveObject remove an object from a bucket.
func (c Client) RemoveObject(bucketName, objectName string) error {
return c.RemoveObjectWithOptions(bucketName, objectName, RemoveObjectOptions{})
}
// RemoveObjectOptions represents options specified by user for PutObject call
type RemoveObjectOptions struct {
GovernanceBypass bool
VersionID string
}
// RemoveObjectWithOptions removes an object from a bucket.
func (c Client) RemoveObjectWithOptions(bucketName, objectName string, opts RemoveObjectOptions) error {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
@ -67,11 +78,29 @@ func (c Client) RemoveObject(bucketName, objectName string) error {
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return err
}
// Get resources properly escaped and lined up before
// using them in http request.
urlValues := make(url.Values)
if opts.VersionID != "" {
urlValues.Set("versionId", opts.VersionID)
}
// Build headers.
headers := make(http.Header)
if opts.GovernanceBypass {
// Set the bypass goverenance retention header
headers.Set("x-amz-bypass-governance-retention", "True")
}
// Execute DELETE on objectName.
resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{
bucketName: bucketName,
objectName: objectName,
contentSHA256Hex: emptySHA256Hex,
queryValues: urlValues,
customHeader: headers,
})
defer closeResponse(resp)
if err != nil {

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* (C) 2018 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* (C) 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -31,8 +31,8 @@ import (
"net/url"
"strings"
"github.com/minio/minio-go/pkg/encrypt"
"github.com/minio/minio-go/pkg/s3utils"
"github.com/minio/minio-go/v6/pkg/encrypt"
"github.com/minio/minio-go/v6/pkg/s3utils"
)
// CSVFileHeaderInfo - is the parameter for whether to utilize headers.
@ -90,19 +90,19 @@ type ParquetInputOptions struct{}
type CSVInputOptions struct {
FileHeaderInfo CSVFileHeaderInfo
RecordDelimiter string
FieldDelimiter string
QuoteCharacter string
QuoteEscapeCharacter string
Comments string
FieldDelimiter string `xml:",omitempty"`
QuoteCharacter string `xml:",omitempty"`
QuoteEscapeCharacter string `xml:",omitempty"`
Comments string `xml:",omitempty"`
}
// CSVOutputOptions csv output specific options
type CSVOutputOptions struct {
QuoteFields CSVQuoteFields
QuoteFields CSVQuoteFields `xml:",omitempty"`
RecordDelimiter string
FieldDelimiter string
QuoteCharacter string
QuoteEscapeCharacter string
FieldDelimiter string `xml:",omitempty"`
QuoteCharacter string `xml:",omitempty"`
QuoteEscapeCharacter string `xml:",omitempty"`
}
// JSONInputOptions json input specific options
@ -251,6 +251,12 @@ func (c Client) SelectObjectContent(ctx context.Context, bucketName, objectName
return nil, err
}
return NewSelectResults(resp, bucketName)
}
// NewSelectResults creates a Select Result parser that parses the response
// and returns a Reader that will return parsed and assembled select output.
func NewSelectResults(resp *http.Response, bucketName string) (*SelectResults, error) {
if resp.StatusCode != http.StatusOK {
return nil, httpRespToErrorResponse(resp, bucketName, "")
}
@ -325,7 +331,7 @@ func (s *SelectResults) start(pipeWriter *io.PipeWriter) {
switch m {
case errorMsg:
pipeWriter.CloseWithError(errors.New("Error Type of " + headers.Get("error-type") + " " + headers.Get("error-message")))
pipeWriter.CloseWithError(errors.New(headers.Get("error-code") + ":\"" + headers.Get("error-message") + "\""))
closeResponse(s.resp)
return
case commonMsg:

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -24,7 +24,7 @@ import (
"strings"
"time"
"github.com/minio/minio-go/pkg/s3utils"
"github.com/minio/minio-go/v6/pkg/s3utils"
)
// BucketExists verify if bucket exists and you have permission to access it.
@ -84,6 +84,7 @@ func extractObjMetadata(header http.Header) http.Header {
"Content-Length",
"Last-Modified",
"Content-Type",
"Expires",
}, defaultFilterKeys...)
return filterHeader(header, filterKeys)
}
@ -170,6 +171,22 @@ func (c Client) statObject(ctx context.Context, bucketName, objectName string, o
contentType = "application/octet-stream"
}
expiryStr := resp.Header.Get("Expires")
var expTime time.Time
if t, err := time.Parse(http.TimeFormat, expiryStr); err == nil {
expTime = t.UTC()
}
metadata := extractObjMetadata(resp.Header)
userMetadata := map[string]string{}
const xamzmeta = "x-amz-meta-"
const xamzmetaLen = len(xamzmeta)
for k, v := range metadata {
if strings.HasPrefix(strings.ToLower(k), xamzmeta) {
userMetadata[k[xamzmetaLen:]] = v[0]
}
}
// Save object metadata info.
return ObjectInfo{
ETag: md5sum,
@ -177,9 +194,11 @@ func (c Client) statObject(ctx context.Context, bucketName, objectName string, o
Size: size,
LastModified: date,
ContentType: contentType,
Expires: expTime,
// Extract only the relevant header keys describing the object.
// following function filters out a list of standard set of keys
// which are not part of object metadata.
Metadata: extractObjMetadata(resp.Header),
Metadata: metadata,
UserMetadata: userMetadata,
}, nil
}

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2018 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -21,7 +21,6 @@ import (
"bytes"
"context"
"crypto/md5"
"crypto/sha256"
"errors"
"fmt"
"hash"
@ -39,11 +38,13 @@ import (
"sync"
"time"
"github.com/minio/sha256-simd"
"golang.org/x/net/publicsuffix"
"github.com/minio/minio-go/pkg/credentials"
"github.com/minio/minio-go/pkg/s3signer"
"github.com/minio/minio-go/pkg/s3utils"
"github.com/minio/minio-go/v6/pkg/credentials"
"github.com/minio/minio-go/v6/pkg/s3signer"
"github.com/minio/minio-go/v6/pkg/s3utils"
)
// Client implements Amazon S3 compatible methods.
@ -73,8 +74,9 @@ type Client struct {
bucketLocCache *bucketLocationCache
// Advanced functionality.
isTraceEnabled bool
traceOutput io.Writer
isTraceEnabled bool
traceErrorsOnly bool
traceOutput io.Writer
// S3 specific accelerated endpoint.
s3AccelerateEndpoint string
@ -102,15 +104,15 @@ type Options struct {
// Global constants.
const (
libraryName = "minio-go"
libraryVersion = "v6.0.14"
libraryVersion = "v6.0.43"
)
// User Agent should always following the below style.
// Please open an issue to discuss any new changes here.
//
// Minio (OS; ARCH) LIB/VER APP/VER
// MinIO (OS; ARCH) LIB/VER APP/VER
const (
libraryUserAgentPrefix = "Minio (" + runtime.GOOS + "; " + runtime.GOARCH + ") "
libraryUserAgentPrefix = "MinIO (" + runtime.GOOS + "; " + runtime.GOARCH + ") "
libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion
)
@ -186,6 +188,12 @@ func NewWithOptions(endpoint string, opts *Options) (*Client, error) {
return privateNew(endpoint, opts.Creds, opts.Secure, opts.Region, opts.BucketLookup)
}
// EndpointURL returns the URL of the S3 endpoint.
func (c *Client) EndpointURL() *url.URL {
endpoint := *c.endpointURL // copy to prevent callers from modifying internal state
return &endpoint
}
// lockedRandSource provides protected rand source, implements rand.Source interface.
type lockedRandSource struct {
lk sync.Mutex
@ -263,7 +271,7 @@ func (c *Client) redirectHeaders(req *http.Request, via []*http.Request) error {
case signerType.IsV2():
return errors.New("signature V2 cannot support redirection")
case signerType.IsV4():
req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, getDefaultLocation(*c.endpointURL, region))
s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, getDefaultLocation(*c.endpointURL, region))
}
}
return nil
@ -295,10 +303,15 @@ func privateNew(endpoint string, creds *credentials.Credentials, secure bool, re
// Save endpoint URL, user agent for future uses.
clnt.endpointURL = endpointURL
transport, err := DefaultTransport(secure)
if err != nil {
return nil, err
}
// Instantiate http client and bucket location cache.
clnt.httpClient = &http.Client{
Jar: jar,
Transport: DefaultTransport,
Transport: transport,
CheckRedirect: clnt.redirectHeaders,
}
@ -325,10 +338,6 @@ func privateNew(endpoint string, creds *credentials.Credentials, secure bool, re
func (c *Client) SetAppInfo(appName string, appVersion string) {
// if app name and version not set, we do not set a new user agent.
if appName != "" && appVersion != "" {
c.appInfo = struct {
appName string
appVersion string
}{}
c.appInfo.appName = appName
c.appInfo.appVersion = appVersion
}
@ -368,10 +377,23 @@ func (c *Client) TraceOn(outputStream io.Writer) {
c.isTraceEnabled = true
}
// TraceErrorsOnlyOn - same as TraceOn, but only errors will be traced.
func (c *Client) TraceErrorsOnlyOn(outputStream io.Writer) {
c.TraceOn(outputStream)
c.traceErrorsOnly = true
}
// TraceErrorsOnlyOff - Turns off the errors only tracing and everything will be traced after this call.
// If all tracing needs to be turned off, call TraceOff().
func (c *Client) TraceErrorsOnlyOff() {
c.traceErrorsOnly = false
}
// TraceOff - disable HTTP tracing.
func (c *Client) TraceOff() {
// Disable tracing.
c.isTraceEnabled = false
c.traceErrorsOnly = false
}
// SetS3TransferAccelerate - turns s3 accelerated endpoint on or off for all your
@ -511,8 +533,9 @@ func (c Client) do(req *http.Request) (*http.Response, error) {
return nil, ErrInvalidArgument(msg)
}
// If trace is enabled, dump http request and response.
if c.isTraceEnabled {
// If trace is enabled, dump http request and response,
// except when the traceErrorsOnly enabled and the response's status code is ok
if c.isTraceEnabled && !(c.traceErrorsOnly && resp.StatusCode == http.StatusOK) {
err = c.dumpHTTP(req, resp)
if err != nil {
return nil, err
@ -637,14 +660,30 @@ func (c Client) executeMethod(ctx context.Context, method string, metadata reque
//
// Additionally we should only retry if bucketLocation and custom
// region is empty.
if metadata.bucketLocation == "" && c.region == "" {
if errResponse.Code == "AuthorizationHeaderMalformed" || errResponse.Code == "InvalidRegion" {
if c.region == "" {
switch errResponse.Code {
case "AuthorizationHeaderMalformed":
fallthrough
case "InvalidRegion":
fallthrough
case "AccessDenied":
if metadata.bucketName != "" && errResponse.Region != "" {
// Gather Cached location only if bucketName is present.
if _, cachedLocationError := c.bucketLocCache.Get(metadata.bucketName); cachedLocationError != false {
if _, cachedOk := c.bucketLocCache.Get(metadata.bucketName); cachedOk {
c.bucketLocCache.Set(metadata.bucketName, errResponse.Region)
continue // Retry.
}
} else {
// Most probably for ListBuckets()
if errResponse.Region != metadata.bucketLocation {
// Retry if the error
// response has a
// different region
// than the request we
// just made.
metadata.bucketLocation = errResponse.Region
continue // Retry
}
}
}
}
@ -678,13 +717,8 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
// Gather location only if bucketName is present.
location, err = c.getBucketLocation(metadata.bucketName)
if err != nil {
if ToErrorResponse(err).Code != "AccessDenied" {
return nil, err
}
return nil, err
}
// Upon AccessDenied error on fetching bucket location, default
// to possible locations based on endpoint URL. This can usually
// happen when GetBucketLocation() is disabled using IAM policies.
}
if location == "" {
location = getDefaultLocation(*c.endpointURL, c.region)
@ -692,10 +726,14 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
}
// Look if target url supports virtual host.
isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, metadata.bucketName)
// We explicitly disallow MakeBucket calls to not use virtual DNS style,
// since the resolution may fail.
isMakeBucket := (metadata.objectName == "" && method == "PUT" && len(metadata.queryValues) == 0)
isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, metadata.bucketName) && !isMakeBucket
// Construct a new target URL.
targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, location, isVirtualHost, metadata.queryValues)
targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, location,
isVirtualHost, metadata.queryValues)
if err != nil {
return nil, err
}

View File

@ -9,28 +9,24 @@ clone_folder: c:\gopath\src\github.com\minio\minio-go
# environment variables
environment:
GOPATH: c:\gopath
GO15VENDOREXPERIMENT: 1
GO111MODULE: on
# scripts that run after cloning repository
install:
- set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
- go version
- go env
- go get -u golang.org/x/lint/golint
- go get -u github.com/remyoudompheng/go-misc/deadcode
- go get -u github.com/gordonklaus/ineffassign
- go get -u golang.org/x/crypto/argon2
- go get -t ./...
- go get golang.org/x/lint/golint
- go get honnef.co/go/tools/cmd/staticcheck
# to run your custom scripts instead of automatic MSBuild
build_script:
- go vet ./...
- gofmt -s -l .
- golint -set_exit_status github.com/minio/minio-go...
- deadcode
- ineffassign .
- go test -short -v
- go test -short -race -v
- golint -set_exit_status github.com/minio/minio-go/...
- staticcheck
- go test -short -v ./...
- go test -short -race -v ./...
# to disable automatic tests
test: off

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -18,14 +18,15 @@
package minio
import (
"net"
"net/http"
"net/url"
"path"
"sync"
"github.com/minio/minio-go/pkg/credentials"
"github.com/minio/minio-go/pkg/s3signer"
"github.com/minio/minio-go/pkg/s3utils"
"github.com/minio/minio-go/v6/pkg/credentials"
"github.com/minio/minio-go/v6/pkg/s3signer"
"github.com/minio/minio-go/v6/pkg/s3utils"
)
// bucketLocationCache - Provides simple mechanism to hold bucket
@ -123,8 +124,16 @@ func processBucketLocationResponse(resp *http.Response, bucketName string) (buck
// For access denied error, it could be an anonymous
// request. Move forward and let the top level callers
// succeed if possible based on their policy.
if errResp.Code == "AccessDenied" {
return "us-east-1", nil
switch errResp.Code {
case "AuthorizationHeaderMalformed":
fallthrough
case "InvalidRegion":
fallthrough
case "AccessDenied":
if errResp.Region == "" {
return "us-east-1", nil
}
return errResp.Region, nil
}
return "", err
}
@ -161,7 +170,15 @@ func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, erro
urlValues.Set("location", "")
// Set get bucket location always as path style.
targetURL := c.endpointURL
targetURL := *c.endpointURL
// as it works in makeTargetURL method from api.go file
if h, p, err := net.SplitHostPort(targetURL.Host); err == nil {
if targetURL.Scheme == "http" && p == "80" || targetURL.Scheme == "https" && p == "443" {
targetURL.Host = h
}
}
targetURL.Path = path.Join(bucketName, "") + "/"
targetURL.RawQuery = urlValues.Encode()

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -20,7 +20,7 @@ package minio
import (
"encoding/xml"
"github.com/minio/minio-go/pkg/set"
"github.com/minio/minio-go/v6/pkg/set"
)
// NotificationEventType is a S3 notification event associated to the bucket notification configuration

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -23,9 +23,9 @@ package minio
// a part in a multipart upload may not be uploaded.
const absMinPartSize = 1024 * 1024 * 5
// minPartSize - minimum part size 64MiB per object after which
// minPartSize - minimum part size 128MiB per object after which
// putObject behaves internally as multipart.
const minPartSize = 1024 * 1024 * 64
const minPartSize = 1024 * 1024 * 128
// maxPartsCount - maximum number of parts for a single multipart session.
const maxPartsCount = 10000

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -20,9 +20,10 @@ package minio
import (
"context"
"io"
"net/http"
"strings"
"github.com/minio/minio-go/pkg/encrypt"
"github.com/minio/minio-go/v6/pkg/encrypt"
)
// Core - Inherits Client and adds new methods to expose the low level S3 APIs.
@ -52,12 +53,26 @@ func (c Core) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int)
// ListObjectsV2 - Lists all the objects at a prefix, similar to ListObjects() but uses
// continuationToken instead of marker to support iteration over the results.
func (c Core) ListObjectsV2(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int, startAfter string) (ListBucketV2Result, error) {
return c.listObjectsV2Query(bucketName, objectPrefix, continuationToken, fetchOwner, delimiter, maxkeys, startAfter)
return c.listObjectsV2Query(bucketName, objectPrefix, continuationToken, fetchOwner, false, delimiter, maxkeys, startAfter)
}
// CopyObjectWithContext - copies an object from source object to destination object on server side.
func (c Core) CopyObjectWithContext(ctx context.Context, sourceBucket, sourceObject, destBucket, destObject string, metadata map[string]string) (ObjectInfo, error) {
return c.copyObjectDo(ctx, sourceBucket, sourceObject, destBucket, destObject, metadata)
}
// CopyObject - copies an object from source object to destination object on server side.
func (c Core) CopyObject(sourceBucket, sourceObject, destBucket, destObject string, metadata map[string]string) (ObjectInfo, error) {
return c.copyObjectDo(context.Background(), sourceBucket, sourceObject, destBucket, destObject, metadata)
return c.CopyObjectWithContext(context.Background(), sourceBucket, sourceObject, destBucket, destObject, metadata)
}
// CopyObjectPartWithContext - creates a part in a multipart upload by copying (a
// part of) an existing object.
func (c Core) CopyObjectPartWithContext(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string,
partID int, startOffset, length int64, metadata map[string]string) (p CompletePart, err error) {
return c.copyObjectPartDo(ctx, srcBucket, srcObject, destBucket, destObject, uploadID,
partID, startOffset, length, metadata)
}
// CopyObjectPart - creates a part in a multipart upload by copying (a
@ -65,12 +80,12 @@ func (c Core) CopyObject(sourceBucket, sourceObject, destBucket, destObject stri
func (c Core) CopyObjectPart(srcBucket, srcObject, destBucket, destObject string, uploadID string,
partID int, startOffset, length int64, metadata map[string]string) (p CompletePart, err error) {
return c.copyObjectPartDo(context.Background(), srcBucket, srcObject, destBucket, destObject, uploadID,
return c.CopyObjectPartWithContext(context.Background(), srcBucket, srcObject, destBucket, destObject, uploadID,
partID, startOffset, length, metadata)
}
// PutObject - Upload object. Uploads using single PUT call.
func (c Core) PutObject(bucket, object string, data io.Reader, size int64, md5Base64, sha256Hex string, metadata map[string]string, sse encrypt.ServerSide) (ObjectInfo, error) {
// PutObjectWithContext - Upload object. Uploads using single PUT call.
func (c Core) PutObjectWithContext(ctx context.Context, bucket, object string, data io.Reader, size int64, md5Base64, sha256Hex string, metadata map[string]string, sse encrypt.ServerSide) (ObjectInfo, error) {
opts := PutObjectOptions{}
m := make(map[string]string)
for k, v := range metadata {
@ -84,7 +99,7 @@ func (c Core) PutObject(bucket, object string, data io.Reader, size int64, md5Ba
opts.ContentType = v
} else if strings.ToLower(k) == "cache-control" {
opts.CacheControl = v
} else if strings.ToLower(k) == strings.ToLower(amzWebsiteRedirectLocation) {
} else if strings.EqualFold(k, amzWebsiteRedirectLocation) {
opts.WebsiteRedirectLocation = v
} else {
m[k] = metadata[k]
@ -92,7 +107,12 @@ func (c Core) PutObject(bucket, object string, data io.Reader, size int64, md5Ba
}
opts.UserMetadata = m
opts.ServerSideEncryption = sse
return c.putObjectDo(context.Background(), bucket, object, data, md5Base64, sha256Hex, size, opts)
return c.putObjectDo(ctx, bucket, object, data, md5Base64, sha256Hex, size, opts)
}
// PutObject - Upload object. Uploads using single PUT call.
func (c Core) PutObject(bucket, object string, data io.Reader, size int64, md5Base64, sha256Hex string, metadata map[string]string, sse encrypt.ServerSide) (ObjectInfo, error) {
return c.PutObjectWithContext(context.Background(), bucket, object, data, size, md5Base64, sha256Hex, metadata, sse)
}
// NewMultipartUpload - Initiates new multipart upload and returns the new uploadID.
@ -106,9 +126,14 @@ func (c Core) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, de
return c.listMultipartUploadsQuery(bucket, keyMarker, uploadIDMarker, prefix, delimiter, maxUploads)
}
// PutObjectPartWithContext - Upload an object part.
func (c Core) PutObjectPartWithContext(ctx context.Context, bucket, object, uploadID string, partID int, data io.Reader, size int64, md5Base64, sha256Hex string, sse encrypt.ServerSide) (ObjectPart, error) {
return c.uploadPart(ctx, bucket, object, uploadID, data, partID, md5Base64, sha256Hex, size, sse)
}
// PutObjectPart - Upload an object part.
func (c Core) PutObjectPart(bucket, object, uploadID string, partID int, data io.Reader, size int64, md5Base64, sha256Hex string, sse encrypt.ServerSide) (ObjectPart, error) {
return c.uploadPart(context.Background(), bucket, object, uploadID, data, partID, md5Base64, sha256Hex, size, sse)
return c.PutObjectPartWithContext(context.Background(), bucket, object, uploadID, partID, data, size, md5Base64, sha256Hex, sse)
}
// ListObjectParts - List uploaded parts of an incomplete upload.x
@ -116,17 +141,27 @@ func (c Core) ListObjectParts(bucket, object, uploadID string, partNumberMarker
return c.listObjectPartsQuery(bucket, object, uploadID, partNumberMarker, maxParts)
}
// CompleteMultipartUpload - Concatenate uploaded parts and commit to an object.
func (c Core) CompleteMultipartUpload(bucket, object, uploadID string, parts []CompletePart) (string, error) {
res, err := c.completeMultipartUpload(context.Background(), bucket, object, uploadID, completeMultipartUpload{
// CompleteMultipartUploadWithContext - Concatenate uploaded parts and commit to an object.
func (c Core) CompleteMultipartUploadWithContext(ctx context.Context, bucket, object, uploadID string, parts []CompletePart) (string, error) {
res, err := c.completeMultipartUpload(ctx, bucket, object, uploadID, completeMultipartUpload{
Parts: parts,
})
return res.ETag, err
}
// CompleteMultipartUpload - Concatenate uploaded parts and commit to an object.
func (c Core) CompleteMultipartUpload(bucket, object, uploadID string, parts []CompletePart) (string, error) {
return c.CompleteMultipartUploadWithContext(context.Background(), bucket, object, uploadID, parts)
}
// AbortMultipartUploadWithContext - Abort an incomplete upload.
func (c Core) AbortMultipartUploadWithContext(ctx context.Context, bucket, object, uploadID string) error {
return c.abortMultipartUpload(ctx, bucket, object, uploadID)
}
// AbortMultipartUpload - Abort an incomplete upload.
func (c Core) AbortMultipartUpload(bucket, object, uploadID string) error {
return c.abortMultipartUpload(context.Background(), bucket, object, uploadID)
return c.AbortMultipartUploadWithContext(context.Background(), bucket, object, uploadID)
}
// GetBucketPolicy - fetches bucket access policy for a given bucket.
@ -139,15 +174,28 @@ func (c Core) PutBucketPolicy(bucket, bucketPolicy string) error {
return c.putBucketPolicy(bucket, bucketPolicy)
}
// GetObjectWithContext is a lower level API implemented to support reading
// partial objects and also downloading objects with special conditions
// matching etag, modtime etc.
func (c Core) GetObjectWithContext(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, http.Header, error) {
return c.getObject(ctx, bucketName, objectName, opts)
}
// GetObject is a lower level API implemented to support reading
// partial objects and also downloading objects with special conditions
// matching etag, modtime etc.
func (c Core) GetObject(bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, error) {
return c.getObject(context.Background(), bucketName, objectName, opts)
func (c Core) GetObject(bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, http.Header, error) {
return c.GetObjectWithContext(context.Background(), bucketName, objectName, opts)
}
// StatObjectWithContext is a lower level API implemented to support special
// conditions matching etag, modtime on a request.
func (c Core) StatObjectWithContext(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) {
return c.statObject(ctx, bucketName, objectName, opts)
}
// StatObject is a lower level API implemented to support special
// conditions matching etag, modtime on a request.
func (c Core) StatObject(bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) {
return c.statObject(context.Background(), bucketName, objectName, opts)
return c.StatObjectWithContext(context.Background(), bucketName, objectName, opts)
}

14
vendor/github.com/minio/minio-go/v6/go.mod generated vendored Normal file
View File

@ -0,0 +1,14 @@
module github.com/minio/minio-go/v6
go 1.12
require (
github.com/dustin/go-humanize v1.0.0 // indirect
github.com/minio/sha256-simd v0.1.1
github.com/mitchellh/go-homedir v1.1.0
github.com/sirupsen/logrus v1.4.2 // indirect
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a // indirect
golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f
golang.org/x/net v0.0.0-20190522155817-f3200d17e092
gopkg.in/ini.v1 v1.42.0
)

41
vendor/github.com/minio/minio-go/v6/go.sum generated vendored Normal file
View File

@ -0,0 +1,41 @@
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU=
github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a h1:pa8hGb/2YqsZKovtsgrwcDH1RZhVbTKCjLp47XpqCDs=
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f h1:R423Cnkcp5JABoeemiGEPlt9tHXFfw5kvc0yqlxRPWo=
golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190522155817-f3200d17e092 h1:4QSRKanuywn15aTZvI/mIDEgPQpswuFndXpOj3rKEco=
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
gopkg.in/ini.v1 v1.42.0 h1:7N3gPTt50s8GuLortA00n8AqRTk75qOP98+mTPpgzRk=
gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -17,7 +17,10 @@
package minio
import "io"
import (
"fmt"
"io"
)
// hookReader hooks additional reader in the source stream. It is
// useful for making progress bars. Second reader is appropriately
@ -34,12 +37,23 @@ func (hr *hookReader) Seek(offset int64, whence int) (n int64, err error) {
// Verify for source has embedded Seeker, use it.
sourceSeeker, ok := hr.source.(io.Seeker)
if ok {
return sourceSeeker.Seek(offset, whence)
n, err = sourceSeeker.Seek(offset, whence)
if err != nil {
return 0, err
}
}
// Verify if hook has embedded Seeker, use it.
hookSeeker, ok := hr.hook.(io.Seeker)
if ok {
return hookSeeker.Seek(offset, whence)
var m int64
m, err = hookSeeker.Seek(offset, whence)
if err != nil {
return 0, err
}
if n != m {
return 0, fmt.Errorf("hook seeker seeked %d bytes, expected source %d bytes", m, n)
}
}
return n, nil
}

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -2,7 +2,7 @@
"version": "8",
"hosts": {
"play": {
"url": "https://play.minio.io:9000",
"url": "https://play.min.io",
"accessKey": "Q3AM3UQ867SPQQA43P2F",
"secretKey": "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
"api": "S3v2"

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -21,8 +21,8 @@ import (
"os"
"path/filepath"
"github.com/go-ini/ini"
homedir "github.com/mitchellh/go-homedir"
ini "gopkg.in/ini.v1"
)
// A FileAWSCredentials retrieves credentials from the current user's home

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -40,7 +40,7 @@ type FileMinioClient struct {
// Windows: "%USERALIAS%\mc\config.json"
filename string
// Minio Alias to extract credentials from the shared credentials file. If empty
// MinIO Alias to extract credentials from the shared credentials file. If empty
// will default to environment variable "MINIO_ALIAS" or "default" if
// environment variable is also not set.
alias string

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -53,7 +53,7 @@ type IAM struct {
const (
defaultIAMRoleEndpoint = "http://169.254.169.254"
defaultECSRoleEndpoint = "http://169.254.170.2"
defaultIAMSecurityCredsPath = "/latest/meta-data/iam/security-credentials"
defaultIAMSecurityCredsPath = "/latest/meta-data/iam/security-credentials/"
)
// https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2019 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -43,7 +43,7 @@ type AssumeRoleWithClientGrantsResponse struct {
}
// ClientGrantsResult - Contains the response to a successful AssumeRoleWithClientGrants
// request, including temporary credentials that can be used to make Minio API requests.
// request, including temporary credentials that can be used to make MinIO API requests.
type ClientGrantsResult struct {
AssumedRoleUser AssumedRoleUser `xml:",omitempty"`
Audience string `xml:",omitempty"`
@ -60,30 +60,19 @@ type ClientGrantsResult struct {
// ClientGrantsToken - client grants token with expiry.
type ClientGrantsToken struct {
token string
expiry int
Token string
Expiry int
}
// Token - access token returned after authenticating client grants.
func (c *ClientGrantsToken) Token() string {
return c.token
}
// Expiry - expiry for the access token returned after authenticating
// client grants.
func (c *ClientGrantsToken) Expiry() string {
return fmt.Sprintf("%d", c.expiry)
}
// A STSClientGrants retrieves credentials from Minio service, and keeps track if
// A STSClientGrants retrieves credentials from MinIO service, and keeps track if
// those credentials are expired.
type STSClientGrants struct {
Expiry
// Required http Client to use when connecting to Minio STS service.
// Required http Client to use when connecting to MinIO STS service.
Client *http.Client
// Minio endpoint to fetch STS credentials.
// MinIO endpoint to fetch STS credentials.
stsEndpoint string
// getClientGrantsTokenExpiry function to retrieve tokens
@ -123,8 +112,8 @@ func getClientGrantsCredentials(clnt *http.Client, endpoint string,
v := url.Values{}
v.Set("Action", "AssumeRoleWithClientGrants")
v.Set("Token", accessToken.Token())
v.Set("DurationSeconds", accessToken.Expiry())
v.Set("Token", accessToken.Token)
v.Set("DurationSeconds", fmt.Sprintf("%d", accessToken.Expiry))
v.Set("Version", "2011-06-15")
u, err := url.Parse(endpoint)
@ -153,7 +142,7 @@ func getClientGrantsCredentials(clnt *http.Client, endpoint string,
return a, nil
}
// Retrieve retrieves credentials from the Minio service.
// Retrieve retrieves credentials from the MinIO service.
// Error will be returned if the request fails.
func (m *STSClientGrants) Retrieve() (Value, error) {
a, err := getClientGrantsCredentials(m.Client, m.stsEndpoint, m.getClientGrantsTokenExpiry)

View File

@ -0,0 +1,119 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package credentials
import (
"encoding/xml"
"errors"
"net/http"
"net/url"
"time"
)
// AssumeRoleWithLDAPResponse contains the result of successful
// AssumeRoleWithLDAPIdentity request
type AssumeRoleWithLDAPResponse struct {
XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithLDAPIdentityResponse" json:"-"`
Result LDAPIdentityResult `xml:"AssumeRoleWithLDAPIdentityResult"`
ResponseMetadata struct {
RequestID string `xml:"RequestId,omitempty"`
} `xml:"ResponseMetadata,omitempty"`
}
// LDAPIdentityResult - contains credentials for a successful
// AssumeRoleWithLDAPIdentity request.
type LDAPIdentityResult struct {
Credentials struct {
AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"`
SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"`
Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"`
SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"`
} `xml:",omitempty"`
SubjectFromToken string `xml:",omitempty"`
}
// LDAPIdentity retrieves credentials from MinIO
type LDAPIdentity struct {
Expiry
stsEndpoint string
ldapUsername, ldapPassword string
}
// NewLDAPIdentity returns new credentials object that uses LDAP
// Identity.
func NewLDAPIdentity(stsEndpoint, ldapUsername, ldapPassword string) (*Credentials, error) {
return New(&LDAPIdentity{
stsEndpoint: stsEndpoint,
ldapUsername: ldapUsername,
ldapPassword: ldapPassword,
}), nil
}
// Retrieve gets the credential by calling the MinIO STS API for
// LDAP on the configured stsEndpoint.
func (k *LDAPIdentity) Retrieve() (value Value, err error) {
u, kerr := url.Parse(k.stsEndpoint)
if kerr != nil {
err = kerr
return
}
clnt := &http.Client{Transport: http.DefaultTransport}
v := url.Values{}
v.Set("Action", "AssumeRoleWithLDAPIdentity")
v.Set("Version", "2011-06-15")
v.Set("LDAPUsername", k.ldapUsername)
v.Set("LDAPPassword", k.ldapPassword)
u.RawQuery = v.Encode()
req, kerr := http.NewRequest("POST", u.String(), nil)
if kerr != nil {
err = kerr
return
}
resp, kerr := clnt.Do(req)
if kerr != nil {
err = kerr
return
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
err = errors.New(resp.Status)
return
}
r := AssumeRoleWithLDAPResponse{}
if err = xml.NewDecoder(resp.Body).Decode(&r); err != nil {
return
}
cr := r.Result.Credentials
k.SetExpiration(cr.Expiration, DefaultExpiryWindow)
return Value{
AccessKeyID: cr.AccessKey,
SecretAccessKey: cr.SecretKey,
SessionToken: cr.SessionToken,
SignerType: SignatureV4,
}, nil
}

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2019 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -36,7 +36,7 @@ type AssumeRoleWithWebIdentityResponse struct {
}
// WebIdentityResult - Contains the response to a successful AssumeRoleWithWebIdentity
// request, including temporary credentials that can be used to make Minio API requests.
// request, including temporary credentials that can be used to make MinIO API requests.
type WebIdentityResult struct {
AssumedRoleUser AssumedRoleUser `xml:",omitempty"`
Audience string `xml:",omitempty"`
@ -53,30 +53,19 @@ type WebIdentityResult struct {
// WebIdentityToken - web identity token with expiry.
type WebIdentityToken struct {
token string
expiry int
Token string
Expiry int
}
// Token - access token returned after authenticating web identity.
func (c *WebIdentityToken) Token() string {
return c.token
}
// Expiry - expiry for the access token returned after authenticating
// web identity.
func (c *WebIdentityToken) Expiry() string {
return fmt.Sprintf("%d", c.expiry)
}
// A STSWebIdentity retrieves credentials from Minio service, and keeps track if
// A STSWebIdentity retrieves credentials from MinIO service, and keeps track if
// those credentials are expired.
type STSWebIdentity struct {
Expiry
// Required http Client to use when connecting to Minio STS service.
// Required http Client to use when connecting to MinIO STS service.
Client *http.Client
// Minio endpoint to fetch STS credentials.
// MinIO endpoint to fetch STS credentials.
stsEndpoint string
// getWebIDTokenExpiry function which returns ID tokens
@ -115,8 +104,8 @@ func getWebIdentityCredentials(clnt *http.Client, endpoint string,
v := url.Values{}
v.Set("Action", "AssumeRoleWithWebIdentity")
v.Set("WebIdentityToken", idToken.Token())
v.Set("DurationSeconds", idToken.Expiry())
v.Set("WebIdentityToken", idToken.Token)
v.Set("DurationSeconds", fmt.Sprintf("%d", idToken.Expiry))
v.Set("Version", "2011-06-15")
u, err := url.Parse(endpoint)
@ -149,7 +138,7 @@ func getWebIdentityCredentials(clnt *http.Client, endpoint string,
return a, nil
}
// Retrieve retrieves credentials from the Minio service.
// Retrieve retrieves credentials from the MinIO service.
// Error will be returned if the request fails.
func (m *STSWebIdentity) Retrieve() (Value, error) {
a, err := getWebIdentityCredentials(m.Client, m.stsEndpoint, m.getWebIDTokenExpiry)

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2018 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -285,7 +285,7 @@ func (s *StreamingReader) Read(buf []byte) (int, error) {
// bytes read from baseReader different than
// content length provided.
if s.bytesRead != s.contentLen {
return 0, io.ErrUnexpectedEOF
return 0, fmt.Errorf("http: ContentLength=%d with Body length %d", s.contentLen, s.bytesRead)
}
// Sign the chunk and write it to s.buf.

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -30,7 +30,7 @@ import (
"strings"
"time"
"github.com/minio/minio-go/pkg/s3utils"
"github.com/minio/minio-go/v6/pkg/s3utils"
)
// Signature and API related constants.

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -26,7 +26,7 @@ import (
"strings"
"time"
"github.com/minio/minio-go/pkg/s3utils"
"github.com/minio/minio-go/v6/pkg/s3utils"
)
// Signature and API related constants.
@ -151,7 +151,7 @@ func getCanonicalHeaders(req http.Request, ignoredHeaders map[string]bool) strin
if idx > 0 {
buf.WriteByte(',')
}
buf.WriteString(v)
buf.WriteString(signV4TrimAll(v))
}
buf.WriteByte('\n')
}

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -19,8 +19,10 @@ package s3signer
import (
"crypto/hmac"
"crypto/sha256"
"net/http"
"strings"
"github.com/minio/sha256-simd"
)
// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when
@ -47,3 +49,11 @@ func getHostAddr(req *http.Request) string {
}
return req.URL.Host
}
// Trim leading and trailing spaces and replace sequential spaces with one space, following Trimall()
// in http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
func signV4TrimAll(input string) string {
// Compress adjacent spaces (a space is determined by
// unicode.IsSpace() internally here) to one space and return
return strings.Join(strings.Fields(input), " ")
}

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -47,8 +47,8 @@ func IsValidDomain(host string) bool {
if host[len(host)-1:] == "_" || host[:1] == "_" {
return false
}
// host cannot start or end with a "."
if host[len(host)-1:] == "." || host[:1] == "." {
// host cannot start with a "."
if host[:1] == "." {
return false
}
// All non alphanumeric characters are invalid.
@ -282,7 +282,7 @@ func checkBucketNameCommon(bucketName string, strict bool) (err error) {
if ipAddress.MatchString(bucketName) {
return errors.New("Bucket name cannot be an ip address")
}
if strings.Contains(bucketName, "..") {
if strings.Contains(bucketName, "..") || strings.Contains(bucketName, ".-") || strings.Contains(bucketName, "-.") {
return errors.New("Bucket name contains invalid characters")
}
if strict {

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -111,6 +111,9 @@ func isHTTPReqErrorRetryable(err error) bool {
} else if strings.Contains(err.Error(), "net/http: HTTP/1.x transport connection broken") {
// If error is transport connection broken, retry.
return true
} else if strings.Contains(err.Error(), "net/http: timeout awaiting response headers") {
// Retry errors due to server not sending the response before timeout
return true
}
}
return false
@ -143,6 +146,7 @@ var retryableHTTPStatusCodes = map[int]struct{}{
http.StatusInternalServerError: {},
http.StatusBadGateway: {},
http.StatusServiceUnavailable: {},
http.StatusGatewayTimeout: {},
// Add more HTTP status codes here.
}

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -29,6 +29,7 @@ var awsS3EndpointMap = map[string]string{
"eu-west-3": "s3.dualstack.eu-west-3.amazonaws.com",
"eu-central-1": "s3.dualstack.eu-central-1.amazonaws.com",
"eu-north-1": "s3.dualstack.eu-north-1.amazonaws.com",
"ap-east-1": "s3.dualstack.ap-east-1.amazonaws.com",
"ap-south-1": "s3.dualstack.ap-south-1.amazonaws.com",
"ap-southeast-1": "s3.dualstack.ap-southeast-1.amazonaws.com",
"ap-southeast-2": "s3.dualstack.ap-southeast-2.amazonaws.com",

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

1
vendor/github.com/minio/minio-go/v6/staticcheck.conf generated vendored Normal file
View File

@ -0,0 +1 @@
checks = ["all", "-ST1005", "-ST1017", "-SA9004", "-ST1000", "-S1021"]

82
vendor/github.com/minio/minio-go/v6/transport.go generated vendored Normal file
View File

@ -0,0 +1,82 @@
// +build go1.7 go1.8
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017-2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"crypto/tls"
"crypto/x509"
"net"
"net/http"
"time"
"golang.org/x/net/http2"
)
// DefaultTransport - this default transport is similar to
// http.DefaultTransport but with additional param DisableCompression
// is set to true to avoid decompressing content with 'gzip' encoding.
var DefaultTransport = func(secure bool) (http.RoundTripper, error) {
tr := &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).DialContext,
MaxIdleConns: 1024,
MaxIdleConnsPerHost: 1024,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
// Set this value so that the underlying transport round-tripper
// doesn't try to auto decode the body of objects with
// content-encoding set to `gzip`.
//
// Refer:
// https://golang.org/src/net/http/transport.go?h=roundTrip#L1843
DisableCompression: true,
}
if secure {
rootCAs, _ := x509.SystemCertPool()
if rootCAs == nil {
// In some systems (like Windows) system cert pool is
// not supported or no certificates are present on the
// system - so we create a new cert pool.
rootCAs = x509.NewCertPool()
}
// Keep TLS config.
tlsConfig := &tls.Config{
RootCAs: rootCAs,
// Can't use SSLv3 because of POODLE and BEAST
// Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher
// Can't use TLSv1.1 because of RC4 cipher usage
MinVersion: tls.VersionTLS12,
}
tr.TLSClientConfig = tlsConfig
// Because we create a custom TLSClientConfig, we have to opt-in to HTTP/2.
// See https://github.com/golang/go/issues/14275
if err := http2.ConfigureTransport(tr); err != nil {
return nil, err
}
}
return tr, nil
}

View File

@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 Minio, Inc.
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -19,7 +19,6 @@ package minio
import (
"crypto/md5"
"crypto/sha256"
"encoding/base64"
"encoding/hex"
"encoding/xml"
@ -32,7 +31,9 @@ import (
"strings"
"time"
"github.com/minio/minio-go/pkg/s3utils"
"github.com/minio/sha256-simd"
"github.com/minio/minio-go/v6/pkg/s3utils"
)
// xmlDecoder provide decoded value in xml.
@ -223,13 +224,15 @@ var supportedHeaders = []string{
"content-disposition",
"content-language",
"x-amz-website-redirect-location",
"x-amz-object-lock-mode",
"x-amz-object-lock-retain-until-date",
"expires",
// Add more supported headers here.
}
// isStorageClassHeader returns true if the header is a supported storage class header
func isStorageClassHeader(headerKey string) bool {
return strings.ToLower(amzStorageClass) == strings.ToLower(headerKey)
return strings.EqualFold(amzStorageClass, headerKey)
}
// isStandardHeader returns true if header is a supported header and not a custom header

1
vendor/github.com/minio/sha256-simd/.gitignore generated vendored Normal file
View File

@ -0,0 +1 @@
*.test

25
vendor/github.com/minio/sha256-simd/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,25 @@
sudo: required
dist: trusty
language: go
os:
- linux
go:
- tip
- 1.12.x
env:
- ARCH=x86_64
- ARCH=i686
matrix:
fast_finish: true
allow_failures:
- go: tip
script:
- diff -au <(gofmt -d .) <(printf "")
- go test -race -v ./...
- go vet -asmdecl .
- ./test-architectures.sh

202
vendor/github.com/minio/sha256-simd/LICENSE generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

133
vendor/github.com/minio/sha256-simd/README.md generated vendored Normal file
View File

@ -0,0 +1,133 @@
# sha256-simd
Accelerate SHA256 computations in pure Go using AVX512, SHA Extensions and AVX2 for Intel and ARM64 for ARM. On AVX512 it provides an up to 8x improvement (over 3 GB/s per core) in comparison to AVX2. SHA Extensions give a performance boost of close to 4x over AVX2.
## Introduction
This package is designed as a replacement for `crypto/sha256`. For Intel CPUs it has two flavors for AVX512 and AVX2 (AVX/SSE are also supported). For ARM CPUs with the Cryptography Extensions, advantage is taken of the SHA2 instructions resulting in a massive performance improvement.
This package uses Golang assembly. The AVX512 version is based on the Intel's "multi-buffer crypto library for IPSec" whereas the other Intel implementations are described in "Fast SHA-256 Implementations on Intel Architecture Processors" by J. Guilford et al.
## New: Support for Intel SHA Extensions
Support for the Intel SHA Extensions has been added by Kristofer Peterson (@svenski123), originally developed for spacemeshos [here](https://github.com/spacemeshos/POET/issues/23). On CPUs that support it (known thus far Intel Celeron J3455 and AMD Ryzen) it gives a significant boost in performance (with thanks to @AudriusButkevicius for reporting the results; full results [here](https://github.com/minio/sha256-simd/pull/37#issuecomment-451607827)).
```
$ benchcmp avx2.txt sha-ext.txt
benchmark AVX2 MB/s SHA Ext MB/s speedup
BenchmarkHash5M 514.40 1975.17 3.84x
```
Thanks to Kristofer Peterson, we also added additional performance changes such as optimized padding, endian conversions which sped up all implementations i.e. Intel SHA alone while doubled performance for small sizes, the other changes increased everything roughly 50%.
## Support for AVX512
We have added support for AVX512 which results in an up to 8x performance improvement over AVX2 (3.0 GHz Xeon Platinum 8124M CPU):
```
$ benchcmp avx2.txt avx512.txt
benchmark AVX2 MB/s AVX512 MB/s speedup
BenchmarkHash5M 448.62 3498.20 7.80x
```
The original code was developed by Intel as part of the [multi-buffer crypto library](https://github.com/intel/intel-ipsec-mb) for IPSec or more specifically this [AVX512](https://github.com/intel/intel-ipsec-mb/blob/master/avx512/sha256_x16_avx512.asm) implementation. The key idea behind it is to process a total of 16 checksums in parallel by “transposing” 16 (independent) messages of 64 bytes between a total of 16 ZMM registers (each 64 bytes wide).
Transposing the input messages means that in order to take full advantage of the speedup you need to have a (server) workload where multiple threads are doing SHA256 calculations in parallel. Unfortunately for this algorithm it is not possible for two message blocks processed in parallel to be dependent on one anotherbecause then the (interim) result of the first part of the message has to be an input into the processing of the second part of the message.
Whereas the original Intel C implementation requires some sort of explicit scheduling of messages to be processed in parallel, for Golang it makes sense to take advantage of channels in order to group messages together and use channels as well for sending back the results (thereby effectively decoupling the calculations). We have implemented a fairly simple scheduling mechanism that seems to work well in practice.
Due to this different way of scheduling, we decided to use an explicit method to instantiate the AVX512 version. Essentially one or more AVX512 processing servers ([`Avx512Server`](https://github.com/minio/sha256-simd/blob/master/sha256blockAvx512_amd64.go#L294)) have to be created whereby each server can hash over 3 GB/s on a single core. An `hash.Hash` object ([`Avx512Digest`](https://github.com/minio/sha256-simd/blob/master/sha256blockAvx512_amd64.go#L45)) is then instantiated using one of these servers and used in the regular fashion:
```go
import "github.com/minio/sha256-simd"
func main() {
server := sha256.NewAvx512Server()
h512 := sha256.NewAvx512(server)
h512.Write(fileBlock)
digest := h512.Sum([]byte{})
}
```
Note that, because of the scheduling overhead, for small messages (< 1 MB) you will be better off using the regular SHA256 hashing (but those are typically not performance critical anyway). Some other tips to get the best performance:
* Have many go routines doing SHA256 calculations in parallel.
* Try to Write() messages in multiples of 64 bytes.
* Try to keep the overall length of messages to a roughly similar size ie. 5 MB (this way all 16 lanes in the AVX512 computations are contributing as much as possible).
More detailed information can be found in this [blog](https://blog.minio.io/accelerate-sha256-up-to-8x-over-3-gb-s-per-core-with-avx512-a0b1d64f78f) post including scaling across cores.
## Drop-In Replacement
The following code snippet shows how you can use `github.com/minio/sha256-simd`. This will automatically select the fastest method for the architecture on which it will be executed.
```go
import "github.com/minio/sha256-simd"
func main() {
...
shaWriter := sha256.New()
io.Copy(shaWriter, file)
...
}
```
## Performance
Below is the speed in MB/s for a single core (ranked fast to slow) for blocks larger than 1 MB.
| Processor | SIMD | Speed (MB/s) |
| --------------------------------- | ------- | ------------:|
| 3.0 GHz Intel Xeon Platinum 8124M | AVX512 | 3498 |
| 3.7 GHz AMD Ryzen 7 2700X | SHA Ext | 1979 |
| 1.2 GHz ARM Cortex-A53 | ARM64 | 638 |
| 3.0 GHz Intel Xeon Platinum 8124M | AVX2 | 449 |
| 3.1 GHz Intel Core i7 | AVX | 362 |
| 3.1 GHz Intel Core i7 | SSE | 299 |
## asm2plan9s
In order to be able to work more easily with AVX512/AVX2 instructions, a separate tool was developed to convert SIMD instructions into the corresponding BYTE sequence as accepted by Go assembly. See [asm2plan9s](https://github.com/minio/asm2plan9s) for more information.
## Why and benefits
One of the most performance sensitive parts of the [Minio](https://github.com/minio/minio) object storage server is related to SHA256 hash sums calculations. For instance during multi part uploads each part that is uploaded needs to be verified for data integrity by the server.
Other applications that can benefit from enhanced SHA256 performance are deduplication in storage systems, intrusion detection, version control systems, integrity checking, etc.
## ARM SHA Extensions
The 64-bit ARMv8 core has introduced new instructions for SHA1 and SHA2 acceleration as part of the [Cryptography Extensions](http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0501f/CHDFJBCJ.html). Below you can see a small excerpt highlighting one of the rounds as is done for the SHA256 calculation process (for full code see [sha256block_arm64.s](https://github.com/minio/sha256-simd/blob/master/sha256block_arm64.s)).
```
sha256h q2, q3, v9.4s
sha256h2 q3, q4, v9.4s
sha256su0 v5.4s, v6.4s
rev32 v8.16b, v8.16b
add v9.4s, v7.4s, v18.4s
mov v4.16b, v2.16b
sha256h q2, q3, v10.4s
sha256h2 q3, q4, v10.4s
sha256su0 v6.4s, v7.4s
sha256su1 v5.4s, v7.4s, v8.4s
```
### Detailed benchmarks
Benchmarks generated on a 1.2 Ghz Quad-Core ARM Cortex A53 equipped [Pine64](https://www.pine64.com/).
```
minio@minio-arm:$ benchcmp golang.txt arm64.txt
benchmark golang arm64 speedup
BenchmarkHash8Bytes-4 0.68 MB/s 5.70 MB/s 8.38x
BenchmarkHash1K-4 5.65 MB/s 326.30 MB/s 57.75x
BenchmarkHash8K-4 6.00 MB/s 570.63 MB/s 95.11x
BenchmarkHash1M-4 6.05 MB/s 638.23 MB/s 105.49x
```
## License
Released under the Apache License v2.0. You can find the complete text in the file LICENSE.
## Contributing
Contributions are welcome, please send PRs for any enhancements.

32
vendor/github.com/minio/sha256-simd/appveyor.yml generated vendored Normal file
View File

@ -0,0 +1,32 @@
# version format
version: "{build}"
# Operating system (build VM template)
os: Windows Server 2012 R2
# Platform.
platform: x64
clone_folder: c:\gopath\src\github.com\minio\sha256-simd
# environment variables
environment:
GOPATH: c:\gopath
GO15VENDOREXPERIMENT: 1
# scripts that run after cloning repository
install:
- set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
- go version
- go env
# to run your custom scripts instead of automatic MSBuild
build_script:
- go test .
- go test -race .
# to disable automatic tests
test: off
# to disable deployment
deploy: off

119
vendor/github.com/minio/sha256-simd/cpuid.go generated vendored Normal file
View File

@ -0,0 +1,119 @@
// Minio Cloud Storage, (C) 2016 Minio, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package sha256
// True when SIMD instructions are available.
var avx512 bool
var avx2 bool
var avx bool
var sse bool
var sse2 bool
var sse3 bool
var ssse3 bool
var sse41 bool
var sse42 bool
var popcnt bool
var sha bool
var armSha = haveArmSha()
func init() {
var _xsave bool
var _osxsave bool
var _avx bool
var _avx2 bool
var _avx512f bool
var _avx512dq bool
// var _avx512pf bool
// var _avx512er bool
// var _avx512cd bool
var _avx512bw bool
var _avx512vl bool
var _sseState bool
var _avxState bool
var _opmaskState bool
var _zmmHI256State bool
var _hi16ZmmState bool
mfi, _, _, _ := cpuid(0)
if mfi >= 1 {
_, _, c, d := cpuid(1)
sse = (d & (1 << 25)) != 0
sse2 = (d & (1 << 26)) != 0
sse3 = (c & (1 << 0)) != 0
ssse3 = (c & (1 << 9)) != 0
sse41 = (c & (1 << 19)) != 0
sse42 = (c & (1 << 20)) != 0
popcnt = (c & (1 << 23)) != 0
_xsave = (c & (1 << 26)) != 0
_osxsave = (c & (1 << 27)) != 0
_avx = (c & (1 << 28)) != 0
}
if mfi >= 7 {
_, b, _, _ := cpuid(7)
_avx2 = (b & (1 << 5)) != 0
_avx512f = (b & (1 << 16)) != 0
_avx512dq = (b & (1 << 17)) != 0
// _avx512pf = (b & (1 << 26)) != 0
// _avx512er = (b & (1 << 27)) != 0
// _avx512cd = (b & (1 << 28)) != 0
_avx512bw = (b & (1 << 30)) != 0
_avx512vl = (b & (1 << 31)) != 0
sha = (b & (1 << 29)) != 0
}
// Stop here if XSAVE unsupported or not enabled
if !_xsave || !_osxsave {
return
}
if _xsave && _osxsave {
a, _ := xgetbv(0)
_sseState = (a & (1 << 1)) != 0
_avxState = (a & (1 << 2)) != 0
_opmaskState = (a & (1 << 5)) != 0
_zmmHI256State = (a & (1 << 6)) != 0
_hi16ZmmState = (a & (1 << 7)) != 0
} else {
_sseState = true
}
// Very unlikely that OS would enable XSAVE and then disable SSE
if !_sseState {
sse = false
sse2 = false
sse3 = false
ssse3 = false
sse41 = false
sse42 = false
}
if _avxState {
avx = _avx
avx2 = _avx2
}
if _opmaskState && _zmmHI256State && _hi16ZmmState {
avx512 = (_avx512f &&
_avx512dq &&
_avx512bw &&
_avx512vl)
}
}

24
vendor/github.com/minio/sha256-simd/cpuid_386.go generated vendored Normal file
View File

@ -0,0 +1,24 @@
// Minio Cloud Storage, (C) 2016 Minio, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package sha256
func cpuid(op uint32) (eax, ebx, ecx, edx uint32)
func cpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
func xgetbv(index uint32) (eax, edx uint32)
func haveArmSha() bool {
return false
}

53
vendor/github.com/minio/sha256-simd/cpuid_386.s generated vendored Normal file
View File

@ -0,0 +1,53 @@
// The MIT License (MIT)
//
// Copyright (c) 2015 Klaus Post
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
// +build 386,!gccgo
// func cpuid(op uint32) (eax, ebx, ecx, edx uint32)
TEXT ·cpuid(SB), 7, $0
XORL CX, CX
MOVL op+0(FP), AX
CPUID
MOVL AX, eax+4(FP)
MOVL BX, ebx+8(FP)
MOVL CX, ecx+12(FP)
MOVL DX, edx+16(FP)
RET
// func cpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
TEXT ·cpuidex(SB), 7, $0
MOVL op+0(FP), AX
MOVL op2+4(FP), CX
CPUID
MOVL AX, eax+8(FP)
MOVL BX, ebx+12(FP)
MOVL CX, ecx+16(FP)
MOVL DX, edx+20(FP)
RET
// func xgetbv(index uint32) (eax, edx uint32)
TEXT ·xgetbv(SB), 7, $0
MOVL index+0(FP), CX
BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV
MOVL AX, eax+4(FP)
MOVL DX, edx+8(FP)
RET

24
vendor/github.com/minio/sha256-simd/cpuid_amd64.go generated vendored Normal file
View File

@ -0,0 +1,24 @@
// Minio Cloud Storage, (C) 2016 Minio, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package sha256
func cpuid(op uint32) (eax, ebx, ecx, edx uint32)
func cpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
func xgetbv(index uint32) (eax, edx uint32)
func haveArmSha() bool {
return false
}

53
vendor/github.com/minio/sha256-simd/cpuid_amd64.s generated vendored Normal file
View File

@ -0,0 +1,53 @@
// The MIT License (MIT)
//
// Copyright (c) 2015 Klaus Post
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
// +build amd64,!gccgo
// func cpuid(op uint32) (eax, ebx, ecx, edx uint32)
TEXT ·cpuid(SB), 7, $0
XORQ CX, CX
MOVL op+0(FP), AX
CPUID
MOVL AX, eax+8(FP)
MOVL BX, ebx+12(FP)
MOVL CX, ecx+16(FP)
MOVL DX, edx+20(FP)
RET
// func cpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
TEXT ·cpuidex(SB), 7, $0
MOVL op+0(FP), AX
MOVL op2+4(FP), CX
CPUID
MOVL AX, eax+8(FP)
MOVL BX, ebx+12(FP)
MOVL CX, ecx+16(FP)
MOVL DX, edx+20(FP)
RET
// func xgetbv(index uint32) (eax, edx uint32)
TEXT ·xgetbv(SB), 7, $0
MOVL index+0(FP), CX
BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV
MOVL AX, eax+8(FP)
MOVL DX, edx+12(FP)
RET

32
vendor/github.com/minio/sha256-simd/cpuid_arm.go generated vendored Normal file
View File

@ -0,0 +1,32 @@
// Minio Cloud Storage, (C) 2016 Minio, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package sha256
func cpuid(op uint32) (eax, ebx, ecx, edx uint32) {
return 0, 0, 0, 0
}
func cpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) {
return 0, 0, 0, 0
}
func xgetbv(index uint32) (eax, edx uint32) {
return 0, 0
}
func haveArmSha() bool {
return false
}

View File

@ -0,0 +1,49 @@
// +build arm64,linux
// Minio Cloud Storage, (C) 2016 Minio, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package sha256
import (
"bytes"
"io/ioutil"
)
func cpuid(op uint32) (eax, ebx, ecx, edx uint32) {
return 0, 0, 0, 0
}
func cpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) {
return 0, 0, 0, 0
}
func xgetbv(index uint32) (eax, edx uint32) {
return 0, 0
}
// File to check for cpu capabilities.
const procCPUInfo = "/proc/cpuinfo"
// Feature to check for.
const sha256Feature = "sha2"
func haveArmSha() bool {
cpuInfo, err := ioutil.ReadFile(procCPUInfo)
if err != nil {
return false
}
return bytes.Contains(cpuInfo, []byte(sha256Feature))
}

34
vendor/github.com/minio/sha256-simd/cpuid_other.go generated vendored Normal file
View File

@ -0,0 +1,34 @@
// Minio Cloud Storage, (C) 2016 Minio, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// +build !386,!amd64,!arm,!arm64 arm64,!linux
package sha256
func cpuid(op uint32) (eax, ebx, ecx, edx uint32) {
return 0, 0, 0, 0
}
func cpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) {
return 0, 0, 0, 0
}
func xgetbv(index uint32) (eax, edx uint32) {
return 0, 0
}
func haveArmSha() bool {
return false
}

3
vendor/github.com/minio/sha256-simd/go.mod generated vendored Normal file
View File

@ -0,0 +1,3 @@
module github.com/minio/sha256-simd
go 1.12

409
vendor/github.com/minio/sha256-simd/sha256.go generated vendored Normal file
View File

@ -0,0 +1,409 @@
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sha256
import (
"crypto/sha256"
"encoding/binary"
"hash"
"runtime"
)
// Size - The size of a SHA256 checksum in bytes.
const Size = 32
// BlockSize - The blocksize of SHA256 in bytes.
const BlockSize = 64
const (
chunk = BlockSize
init0 = 0x6A09E667
init1 = 0xBB67AE85
init2 = 0x3C6EF372
init3 = 0xA54FF53A
init4 = 0x510E527F
init5 = 0x9B05688C
init6 = 0x1F83D9AB
init7 = 0x5BE0CD19
)
// digest represents the partial evaluation of a checksum.
type digest struct {
h [8]uint32
x [chunk]byte
nx int
len uint64
}
// Reset digest back to default
func (d *digest) Reset() {
d.h[0] = init0
d.h[1] = init1
d.h[2] = init2
d.h[3] = init3
d.h[4] = init4
d.h[5] = init5
d.h[6] = init6
d.h[7] = init7
d.nx = 0
d.len = 0
}
type blockfuncType int
const (
blockfuncGeneric blockfuncType = iota
blockfuncAvx512 blockfuncType = iota
blockfuncAvx2 blockfuncType = iota
blockfuncAvx blockfuncType = iota
blockfuncSsse blockfuncType = iota
blockfuncSha blockfuncType = iota
blockfuncArm blockfuncType = iota
)
var blockfunc blockfuncType
func init() {
is386bit := runtime.GOARCH == "386"
isARM := runtime.GOARCH == "arm"
switch {
case is386bit || isARM:
blockfunc = blockfuncGeneric
case sha && ssse3 && sse41:
blockfunc = blockfuncSha
case avx2:
blockfunc = blockfuncAvx2
case avx:
blockfunc = blockfuncAvx
case ssse3:
blockfunc = blockfuncSsse
case armSha:
blockfunc = blockfuncArm
default:
blockfunc = blockfuncGeneric
}
}
// New returns a new hash.Hash computing the SHA256 checksum.
func New() hash.Hash {
if blockfunc != blockfuncGeneric {
d := new(digest)
d.Reset()
return d
}
// Fallback to the standard golang implementation
// if no features were found.
return sha256.New()
}
// Sum256 - single caller sha256 helper
func Sum256(data []byte) (result [Size]byte) {
var d digest
d.Reset()
d.Write(data)
result = d.checkSum()
return
}
// Return size of checksum
func (d *digest) Size() int { return Size }
// Return blocksize of checksum
func (d *digest) BlockSize() int { return BlockSize }
// Write to digest
func (d *digest) Write(p []byte) (nn int, err error) {
nn = len(p)
d.len += uint64(nn)
if d.nx > 0 {
n := copy(d.x[d.nx:], p)
d.nx += n
if d.nx == chunk {
block(d, d.x[:])
d.nx = 0
}
p = p[n:]
}
if len(p) >= chunk {
n := len(p) &^ (chunk - 1)
block(d, p[:n])
p = p[n:]
}
if len(p) > 0 {
d.nx = copy(d.x[:], p)
}
return
}
// Return sha256 sum in bytes
func (d *digest) Sum(in []byte) []byte {
// Make a copy of d0 so that caller can keep writing and summing.
d0 := *d
hash := d0.checkSum()
return append(in, hash[:]...)
}
// Intermediate checksum function
func (d *digest) checkSum() (digest [Size]byte) {
n := d.nx
var k [64]byte
copy(k[:], d.x[:n])
k[n] = 0x80
if n >= 56 {
block(d, k[:])
// clear block buffer - go compiles this to optimal 1x xorps + 4x movups
// unfortunately expressing this more succinctly results in much worse code
k[0] = 0
k[1] = 0
k[2] = 0
k[3] = 0
k[4] = 0
k[5] = 0
k[6] = 0
k[7] = 0
k[8] = 0
k[9] = 0
k[10] = 0
k[11] = 0
k[12] = 0
k[13] = 0
k[14] = 0
k[15] = 0
k[16] = 0
k[17] = 0
k[18] = 0
k[19] = 0
k[20] = 0
k[21] = 0
k[22] = 0
k[23] = 0
k[24] = 0
k[25] = 0
k[26] = 0
k[27] = 0
k[28] = 0
k[29] = 0
k[30] = 0
k[31] = 0
k[32] = 0
k[33] = 0
k[34] = 0
k[35] = 0
k[36] = 0
k[37] = 0
k[38] = 0
k[39] = 0
k[40] = 0
k[41] = 0
k[42] = 0
k[43] = 0
k[44] = 0
k[45] = 0
k[46] = 0
k[47] = 0
k[48] = 0
k[49] = 0
k[50] = 0
k[51] = 0
k[52] = 0
k[53] = 0
k[54] = 0
k[55] = 0
k[56] = 0
k[57] = 0
k[58] = 0
k[59] = 0
k[60] = 0
k[61] = 0
k[62] = 0
k[63] = 0
}
binary.BigEndian.PutUint64(k[56:64], uint64(d.len)<<3)
block(d, k[:])
{
const i = 0
binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i])
}
{
const i = 1
binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i])
}
{
const i = 2
binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i])
}
{
const i = 3
binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i])
}
{
const i = 4
binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i])
}
{
const i = 5
binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i])
}
{
const i = 6
binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i])
}
{
const i = 7
binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i])
}
return
}
func block(dig *digest, p []byte) {
if blockfunc == blockfuncSha {
blockShaGo(dig, p)
} else if blockfunc == blockfuncAvx2 {
blockAvx2Go(dig, p)
} else if blockfunc == blockfuncAvx {
blockAvxGo(dig, p)
} else if blockfunc == blockfuncSsse {
blockSsseGo(dig, p)
} else if blockfunc == blockfuncArm {
blockArmGo(dig, p)
} else if blockfunc == blockfuncGeneric {
blockGeneric(dig, p)
}
}
func blockGeneric(dig *digest, p []byte) {
var w [64]uint32
h0, h1, h2, h3, h4, h5, h6, h7 := dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7]
for len(p) >= chunk {
// Can interlace the computation of w with the
// rounds below if needed for speed.
for i := 0; i < 16; i++ {
j := i * 4
w[i] = uint32(p[j])<<24 | uint32(p[j+1])<<16 | uint32(p[j+2])<<8 | uint32(p[j+3])
}
for i := 16; i < 64; i++ {
v1 := w[i-2]
t1 := (v1>>17 | v1<<(32-17)) ^ (v1>>19 | v1<<(32-19)) ^ (v1 >> 10)
v2 := w[i-15]
t2 := (v2>>7 | v2<<(32-7)) ^ (v2>>18 | v2<<(32-18)) ^ (v2 >> 3)
w[i] = t1 + w[i-7] + t2 + w[i-16]
}
a, b, c, d, e, f, g, h := h0, h1, h2, h3, h4, h5, h6, h7
for i := 0; i < 64; i++ {
t1 := h + ((e>>6 | e<<(32-6)) ^ (e>>11 | e<<(32-11)) ^ (e>>25 | e<<(32-25))) + ((e & f) ^ (^e & g)) + _K[i] + w[i]
t2 := ((a>>2 | a<<(32-2)) ^ (a>>13 | a<<(32-13)) ^ (a>>22 | a<<(32-22))) + ((a & b) ^ (a & c) ^ (b & c))
h = g
g = f
f = e
e = d + t1
d = c
c = b
b = a
a = t1 + t2
}
h0 += a
h1 += b
h2 += c
h3 += d
h4 += e
h5 += f
h6 += g
h7 += h
p = p[chunk:]
}
dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h0, h1, h2, h3, h4, h5, h6, h7
}
var _K = []uint32{
0x428a2f98,
0x71374491,
0xb5c0fbcf,
0xe9b5dba5,
0x3956c25b,
0x59f111f1,
0x923f82a4,
0xab1c5ed5,
0xd807aa98,
0x12835b01,
0x243185be,
0x550c7dc3,
0x72be5d74,
0x80deb1fe,
0x9bdc06a7,
0xc19bf174,
0xe49b69c1,
0xefbe4786,
0x0fc19dc6,
0x240ca1cc,
0x2de92c6f,
0x4a7484aa,
0x5cb0a9dc,
0x76f988da,
0x983e5152,
0xa831c66d,
0xb00327c8,
0xbf597fc7,
0xc6e00bf3,
0xd5a79147,
0x06ca6351,
0x14292967,
0x27b70a85,
0x2e1b2138,
0x4d2c6dfc,
0x53380d13,
0x650a7354,
0x766a0abb,
0x81c2c92e,
0x92722c85,
0xa2bfe8a1,
0xa81a664b,
0xc24b8b70,
0xc76c51a3,
0xd192e819,
0xd6990624,
0xf40e3585,
0x106aa070,
0x19a4c116,
0x1e376c08,
0x2748774c,
0x34b0bcb5,
0x391c0cb3,
0x4ed8aa4a,
0x5b9cca4f,
0x682e6ff3,
0x748f82ee,
0x78a5636f,
0x84c87814,
0x8cc70208,
0x90befffa,
0xa4506ceb,
0xbef9a3f7,
0xc67178f2,
}

View File

@ -0,0 +1,22 @@
//+build !noasm,!appengine
/*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sha256
//go:noescape
func blockAvx2(h []uint32, message []uint8)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,686 @@
// 16x Parallel implementation of SHA256 for AVX512
//
// Minio Cloud Storage, (C) 2017 Minio, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// This code is based on the Intel Multi-Buffer Crypto for IPSec library
// and more specifically the following implementation:
// https://github.com/intel/intel-ipsec-mb/blob/master/avx512/sha256_x16_avx512.asm
//
// For Golang it has been converted into Plan 9 assembly with the help of
// github.com/minio/asm2plan9s to assemble the AVX512 instructions
//
// Copyright (c) 2017, Intel Corporation
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of Intel Corporation nor the names of its contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#define SHA256_DIGEST_ROW_SIZE 64
// arg1
#define STATE rdi
#define STATE_P9 DI
// arg2
#define INP_SIZE rsi
#define INP_SIZE_P9 SI
#define IDX rcx
#define TBL rdx
#define TBL_P9 DX
#define INPUT rax
#define INPUT_P9 AX
#define inp0 r9
#define SCRATCH_P9 R12
#define SCRATCH r12
#define maskp r13
#define MASKP_P9 R13
#define mask r14
#define MASK_P9 R14
#define A zmm0
#define B zmm1
#define C zmm2
#define D zmm3
#define E zmm4
#define F zmm5
#define G zmm6
#define H zmm7
#define T1 zmm8
#define TMP0 zmm9
#define TMP1 zmm10
#define TMP2 zmm11
#define TMP3 zmm12
#define TMP4 zmm13
#define TMP5 zmm14
#define TMP6 zmm15
#define W0 zmm16
#define W1 zmm17
#define W2 zmm18
#define W3 zmm19
#define W4 zmm20
#define W5 zmm21
#define W6 zmm22
#define W7 zmm23
#define W8 zmm24
#define W9 zmm25
#define W10 zmm26
#define W11 zmm27
#define W12 zmm28
#define W13 zmm29
#define W14 zmm30
#define W15 zmm31
#define TRANSPOSE16(_r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7, _r8, _r9, _r10, _r11, _r12, _r13, _r14, _r15, _t0, _t1) \
\
\ // input r0 = {a15 a14 a13 a12 a11 a10 a9 a8 a7 a6 a5 a4 a3 a2 a1 a0}
\ // r1 = {b15 b14 b13 b12 b11 b10 b9 b8 b7 b6 b5 b4 b3 b2 b1 b0}
\ // r2 = {c15 c14 c13 c12 c11 c10 c9 c8 c7 c6 c5 c4 c3 c2 c1 c0}
\ // r3 = {d15 d14 d13 d12 d11 d10 d9 d8 d7 d6 d5 d4 d3 d2 d1 d0}
\ // r4 = {e15 e14 e13 e12 e11 e10 e9 e8 e7 e6 e5 e4 e3 e2 e1 e0}
\ // r5 = {f15 f14 f13 f12 f11 f10 f9 f8 f7 f6 f5 f4 f3 f2 f1 f0}
\ // r6 = {g15 g14 g13 g12 g11 g10 g9 g8 g7 g6 g5 g4 g3 g2 g1 g0}
\ // r7 = {h15 h14 h13 h12 h11 h10 h9 h8 h7 h6 h5 h4 h3 h2 h1 h0}
\ // r8 = {i15 i14 i13 i12 i11 i10 i9 i8 i7 i6 i5 i4 i3 i2 i1 i0}
\ // r9 = {j15 j14 j13 j12 j11 j10 j9 j8 j7 j6 j5 j4 j3 j2 j1 j0}
\ // r10 = {k15 k14 k13 k12 k11 k10 k9 k8 k7 k6 k5 k4 k3 k2 k1 k0}
\ // r11 = {l15 l14 l13 l12 l11 l10 l9 l8 l7 l6 l5 l4 l3 l2 l1 l0}
\ // r12 = {m15 m14 m13 m12 m11 m10 m9 m8 m7 m6 m5 m4 m3 m2 m1 m0}
\ // r13 = {n15 n14 n13 n12 n11 n10 n9 n8 n7 n6 n5 n4 n3 n2 n1 n0}
\ // r14 = {o15 o14 o13 o12 o11 o10 o9 o8 o7 o6 o5 o4 o3 o2 o1 o0}
\ // r15 = {p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0}
\
\ // output r0 = { p0 o0 n0 m0 l0 k0 j0 i0 h0 g0 f0 e0 d0 c0 b0 a0}
\ // r1 = { p1 o1 n1 m1 l1 k1 j1 i1 h1 g1 f1 e1 d1 c1 b1 a1}
\ // r2 = { p2 o2 n2 m2 l2 k2 j2 i2 h2 g2 f2 e2 d2 c2 b2 a2}
\ // r3 = { p3 o3 n3 m3 l3 k3 j3 i3 h3 g3 f3 e3 d3 c3 b3 a3}
\ // r4 = { p4 o4 n4 m4 l4 k4 j4 i4 h4 g4 f4 e4 d4 c4 b4 a4}
\ // r5 = { p5 o5 n5 m5 l5 k5 j5 i5 h5 g5 f5 e5 d5 c5 b5 a5}
\ // r6 = { p6 o6 n6 m6 l6 k6 j6 i6 h6 g6 f6 e6 d6 c6 b6 a6}
\ // r7 = { p7 o7 n7 m7 l7 k7 j7 i7 h7 g7 f7 e7 d7 c7 b7 a7}
\ // r8 = { p8 o8 n8 m8 l8 k8 j8 i8 h8 g8 f8 e8 d8 c8 b8 a8}
\ // r9 = { p9 o9 n9 m9 l9 k9 j9 i9 h9 g9 f9 e9 d9 c9 b9 a9}
\ // r10 = {p10 o10 n10 m10 l10 k10 j10 i10 h10 g10 f10 e10 d10 c10 b10 a10}
\ // r11 = {p11 o11 n11 m11 l11 k11 j11 i11 h11 g11 f11 e11 d11 c11 b11 a11}
\ // r12 = {p12 o12 n12 m12 l12 k12 j12 i12 h12 g12 f12 e12 d12 c12 b12 a12}
\ // r13 = {p13 o13 n13 m13 l13 k13 j13 i13 h13 g13 f13 e13 d13 c13 b13 a13}
\ // r14 = {p14 o14 n14 m14 l14 k14 j14 i14 h14 g14 f14 e14 d14 c14 b14 a14}
\ // r15 = {p15 o15 n15 m15 l15 k15 j15 i15 h15 g15 f15 e15 d15 c15 b15 a15}
\
\ // process top half
vshufps _t0, _r0, _r1, 0x44 \ // t0 = {b13 b12 a13 a12 b9 b8 a9 a8 b5 b4 a5 a4 b1 b0 a1 a0}
vshufps _r0, _r0, _r1, 0xEE \ // r0 = {b15 b14 a15 a14 b11 b10 a11 a10 b7 b6 a7 a6 b3 b2 a3 a2}
vshufps _t1, _r2, _r3, 0x44 \ // t1 = {d13 d12 c13 c12 d9 d8 c9 c8 d5 d4 c5 c4 d1 d0 c1 c0}
vshufps _r2, _r2, _r3, 0xEE \ // r2 = {d15 d14 c15 c14 d11 d10 c11 c10 d7 d6 c7 c6 d3 d2 c3 c2}
\
vshufps _r3, _t0, _t1, 0xDD \ // r3 = {d13 c13 b13 a13 d9 c9 b9 a9 d5 c5 b5 a5 d1 c1 b1 a1}
vshufps _r1, _r0, _r2, 0x88 \ // r1 = {d14 c14 b14 a14 d10 c10 b10 a10 d6 c6 b6 a6 d2 c2 b2 a2}
vshufps _r0, _r0, _r2, 0xDD \ // r0 = {d15 c15 b15 a15 d11 c11 b11 a11 d7 c7 b7 a7 d3 c3 b3 a3}
vshufps _t0, _t0, _t1, 0x88 \ // t0 = {d12 c12 b12 a12 d8 c8 b8 a8 d4 c4 b4 a4 d0 c0 b0 a0}
\
\ // use r2 in place of t0
vshufps _r2, _r4, _r5, 0x44 \ // r2 = {f13 f12 e13 e12 f9 f8 e9 e8 f5 f4 e5 e4 f1 f0 e1 e0}
vshufps _r4, _r4, _r5, 0xEE \ // r4 = {f15 f14 e15 e14 f11 f10 e11 e10 f7 f6 e7 e6 f3 f2 e3 e2}
vshufps _t1, _r6, _r7, 0x44 \ // t1 = {h13 h12 g13 g12 h9 h8 g9 g8 h5 h4 g5 g4 h1 h0 g1 g0}
vshufps _r6, _r6, _r7, 0xEE \ // r6 = {h15 h14 g15 g14 h11 h10 g11 g10 h7 h6 g7 g6 h3 h2 g3 g2}
\
vshufps _r7, _r2, _t1, 0xDD \ // r7 = {h13 g13 f13 e13 h9 g9 f9 e9 h5 g5 f5 e5 h1 g1 f1 e1}
vshufps _r5, _r4, _r6, 0x88 \ // r5 = {h14 g14 f14 e14 h10 g10 f10 e10 h6 g6 f6 e6 h2 g2 f2 e2}
vshufps _r4, _r4, _r6, 0xDD \ // r4 = {h15 g15 f15 e15 h11 g11 f11 e11 h7 g7 f7 e7 h3 g3 f3 e3}
vshufps _r2, _r2, _t1, 0x88 \ // r2 = {h12 g12 f12 e12 h8 g8 f8 e8 h4 g4 f4 e4 h0 g0 f0 e0}
\
\ // use r6 in place of t0
vshufps _r6, _r8, _r9, 0x44 \ // r6 = {j13 j12 i13 i12 j9 j8 i9 i8 j5 j4 i5 i4 j1 j0 i1 i0}
vshufps _r8, _r8, _r9, 0xEE \ // r8 = {j15 j14 i15 i14 j11 j10 i11 i10 j7 j6 i7 i6 j3 j2 i3 i2}
vshufps _t1, _r10, _r11, 0x44 \ // t1 = {l13 l12 k13 k12 l9 l8 k9 k8 l5 l4 k5 k4 l1 l0 k1 k0}
vshufps _r10, _r10, _r11, 0xEE \ // r10 = {l15 l14 k15 k14 l11 l10 k11 k10 l7 l6 k7 k6 l3 l2 k3 k2}
\
vshufps _r11, _r6, _t1, 0xDD \ // r11 = {l13 k13 j13 113 l9 k9 j9 i9 l5 k5 j5 i5 l1 k1 j1 i1}
vshufps _r9, _r8, _r10, 0x88 \ // r9 = {l14 k14 j14 114 l10 k10 j10 i10 l6 k6 j6 i6 l2 k2 j2 i2}
vshufps _r8, _r8, _r10, 0xDD \ // r8 = {l15 k15 j15 115 l11 k11 j11 i11 l7 k7 j7 i7 l3 k3 j3 i3}
vshufps _r6, _r6, _t1, 0x88 \ // r6 = {l12 k12 j12 112 l8 k8 j8 i8 l4 k4 j4 i4 l0 k0 j0 i0}
\
\ // use r10 in place of t0
vshufps _r10, _r12, _r13, 0x44 \ // r10 = {n13 n12 m13 m12 n9 n8 m9 m8 n5 n4 m5 m4 n1 n0 a1 m0}
vshufps _r12, _r12, _r13, 0xEE \ // r12 = {n15 n14 m15 m14 n11 n10 m11 m10 n7 n6 m7 m6 n3 n2 a3 m2}
vshufps _t1, _r14, _r15, 0x44 \ // t1 = {p13 p12 013 012 p9 p8 09 08 p5 p4 05 04 p1 p0 01 00}
vshufps _r14, _r14, _r15, 0xEE \ // r14 = {p15 p14 015 014 p11 p10 011 010 p7 p6 07 06 p3 p2 03 02}
\
vshufps _r15, _r10, _t1, 0xDD \ // r15 = {p13 013 n13 m13 p9 09 n9 m9 p5 05 n5 m5 p1 01 n1 m1}
vshufps _r13, _r12, _r14, 0x88 \ // r13 = {p14 014 n14 m14 p10 010 n10 m10 p6 06 n6 m6 p2 02 n2 m2}
vshufps _r12, _r12, _r14, 0xDD \ // r12 = {p15 015 n15 m15 p11 011 n11 m11 p7 07 n7 m7 p3 03 n3 m3}
vshufps _r10, _r10, _t1, 0x88 \ // r10 = {p12 012 n12 m12 p8 08 n8 m8 p4 04 n4 m4 p0 00 n0 m0}
\
\ // At this point, the registers that contain interesting data are:
\ // t0, r3, r1, r0, r2, r7, r5, r4, r6, r11, r9, r8, r10, r15, r13, r12
\ // Can use t1 and r14 as scratch registers
LEAQ PSHUFFLE_TRANSPOSE16_MASK1<>(SB), BX \
LEAQ PSHUFFLE_TRANSPOSE16_MASK2<>(SB), R8 \
\
vmovdqu32 _r14, [rbx] \
vpermi2q _r14, _t0, _r2 \ // r14 = {h8 g8 f8 e8 d8 c8 b8 a8 h0 g0 f0 e0 d0 c0 b0 a0}
vmovdqu32 _t1, [r8] \
vpermi2q _t1, _t0, _r2 \ // t1 = {h12 g12 f12 e12 d12 c12 b12 a12 h4 g4 f4 e4 d4 c4 b4 a4}
\
vmovdqu32 _r2, [rbx] \
vpermi2q _r2, _r3, _r7 \ // r2 = {h9 g9 f9 e9 d9 c9 b9 a9 h1 g1 f1 e1 d1 c1 b1 a1}
vmovdqu32 _t0, [r8] \
vpermi2q _t0, _r3, _r7 \ // t0 = {h13 g13 f13 e13 d13 c13 b13 a13 h5 g5 f5 e5 d5 c5 b5 a5}
\
vmovdqu32 _r3, [rbx] \
vpermi2q _r3, _r1, _r5 \ // r3 = {h10 g10 f10 e10 d10 c10 b10 a10 h2 g2 f2 e2 d2 c2 b2 a2}
vmovdqu32 _r7, [r8] \
vpermi2q _r7, _r1, _r5 \ // r7 = {h14 g14 f14 e14 d14 c14 b14 a14 h6 g6 f6 e6 d6 c6 b6 a6}
\
vmovdqu32 _r1, [rbx] \
vpermi2q _r1, _r0, _r4 \ // r1 = {h11 g11 f11 e11 d11 c11 b11 a11 h3 g3 f3 e3 d3 c3 b3 a3}
vmovdqu32 _r5, [r8] \
vpermi2q _r5, _r0, _r4 \ // r5 = {h15 g15 f15 e15 d15 c15 b15 a15 h7 g7 f7 e7 d7 c7 b7 a7}
\
vmovdqu32 _r0, [rbx] \
vpermi2q _r0, _r6, _r10 \ // r0 = {p8 o8 n8 m8 l8 k8 j8 i8 p0 o0 n0 m0 l0 k0 j0 i0}
vmovdqu32 _r4, [r8] \
vpermi2q _r4, _r6, _r10 \ // r4 = {p12 o12 n12 m12 l12 k12 j12 i12 p4 o4 n4 m4 l4 k4 j4 i4}
\
vmovdqu32 _r6, [rbx] \
vpermi2q _r6, _r11, _r15 \ // r6 = {p9 o9 n9 m9 l9 k9 j9 i9 p1 o1 n1 m1 l1 k1 j1 i1}
vmovdqu32 _r10, [r8] \
vpermi2q _r10, _r11, _r15 \ // r10 = {p13 o13 n13 m13 l13 k13 j13 i13 p5 o5 n5 m5 l5 k5 j5 i5}
\
vmovdqu32 _r11, [rbx] \
vpermi2q _r11, _r9, _r13 \ // r11 = {p10 o10 n10 m10 l10 k10 j10 i10 p2 o2 n2 m2 l2 k2 j2 i2}
vmovdqu32 _r15, [r8] \
vpermi2q _r15, _r9, _r13 \ // r15 = {p14 o14 n14 m14 l14 k14 j14 i14 p6 o6 n6 m6 l6 k6 j6 i6}
\
vmovdqu32 _r9, [rbx] \
vpermi2q _r9, _r8, _r12 \ // r9 = {p11 o11 n11 m11 l11 k11 j11 i11 p3 o3 n3 m3 l3 k3 j3 i3}
vmovdqu32 _r13, [r8] \
vpermi2q _r13, _r8, _r12 \ // r13 = {p15 o15 n15 m15 l15 k15 j15 i15 p7 o7 n7 m7 l7 k7 j7 i7}
\
\ // At this point r8 and r12 can be used as scratch registers
vshuff64x2 _r8, _r14, _r0, 0xEE \ // r8 = {p8 o8 n8 m8 l8 k8 j8 i8 h8 g8 f8 e8 d8 c8 b8 a8}
vshuff64x2 _r0, _r14, _r0, 0x44 \ // r0 = {p0 o0 n0 m0 l0 k0 j0 i0 h0 g0 f0 e0 d0 c0 b0 a0}
\
vshuff64x2 _r12, _t1, _r4, 0xEE \ // r12 = {p12 o12 n12 m12 l12 k12 j12 i12 h12 g12 f12 e12 d12 c12 b12 a12}
vshuff64x2 _r4, _t1, _r4, 0x44 \ // r4 = {p4 o4 n4 m4 l4 k4 j4 i4 h4 g4 f4 e4 d4 c4 b4 a4}
\
vshuff64x2 _r14, _r7, _r15, 0xEE \ // r14 = {p14 o14 n14 m14 l14 k14 j14 i14 h14 g14 f14 e14 d14 c14 b14 a14}
vshuff64x2 _t1, _r7, _r15, 0x44 \ // t1 = {p6 o6 n6 m6 l6 k6 j6 i6 h6 g6 f6 e6 d6 c6 b6 a6}
\
vshuff64x2 _r15, _r5, _r13, 0xEE \ // r15 = {p15 o15 n15 m15 l15 k15 j15 i15 h15 g15 f15 e15 d15 c15 b15 a15}
vshuff64x2 _r7, _r5, _r13, 0x44 \ // r7 = {p7 o7 n7 m7 l7 k7 j7 i7 h7 g7 f7 e7 d7 c7 b7 a7}
\
vshuff64x2 _r13, _t0, _r10, 0xEE \ // r13 = {p13 o13 n13 m13 l13 k13 j13 i13 h13 g13 f13 e13 d13 c13 b13 a13}
vshuff64x2 _r5, _t0, _r10, 0x44 \ // r5 = {p5 o5 n5 m5 l5 k5 j5 i5 h5 g5 f5 e5 d5 c5 b5 a5}
\
vshuff64x2 _r10, _r3, _r11, 0xEE \ // r10 = {p10 o10 n10 m10 l10 k10 j10 i10 h10 g10 f10 e10 d10 c10 b10 a10}
vshuff64x2 _t0, _r3, _r11, 0x44 \ // t0 = {p2 o2 n2 m2 l2 k2 j2 i2 h2 g2 f2 e2 d2 c2 b2 a2}
\
vshuff64x2 _r11, _r1, _r9, 0xEE \ // r11 = {p11 o11 n11 m11 l11 k11 j11 i11 h11 g11 f11 e11 d11 c11 b11 a11}
vshuff64x2 _r3, _r1, _r9, 0x44 \ // r3 = {p3 o3 n3 m3 l3 k3 j3 i3 h3 g3 f3 e3 d3 c3 b3 a3}
\
vshuff64x2 _r9, _r2, _r6, 0xEE \ // r9 = {p9 o9 n9 m9 l9 k9 j9 i9 h9 g9 f9 e9 d9 c9 b9 a9}
vshuff64x2 _r1, _r2, _r6, 0x44 \ // r1 = {p1 o1 n1 m1 l1 k1 j1 i1 h1 g1 f1 e1 d1 c1 b1 a1}
\
vmovdqu32 _r2, _t0 \ // r2 = {p2 o2 n2 m2 l2 k2 j2 i2 h2 g2 f2 e2 d2 c2 b2 a2}
vmovdqu32 _r6, _t1 \ // r6 = {p6 o6 n6 m6 l6 k6 j6 i6 h6 g6 f6 e6 d6 c6 b6 a6}
// CH(A, B, C) = (A&B) ^ (~A&C)
// MAJ(E, F, G) = (E&F) ^ (E&G) ^ (F&G)
// SIGMA0 = ROR_2 ^ ROR_13 ^ ROR_22
// SIGMA1 = ROR_6 ^ ROR_11 ^ ROR_25
// sigma0 = ROR_7 ^ ROR_18 ^ SHR_3
// sigma1 = ROR_17 ^ ROR_19 ^ SHR_10
// Main processing loop per round
#define PROCESS_LOOP(_WT, _ROUND, _A, _B, _C, _D, _E, _F, _G, _H) \
\ // T1 = H + SIGMA1(E) + CH(E, F, G) + Kt + Wt
\ // T2 = SIGMA0(A) + MAJ(A, B, C)
\ // H=G, G=F, F=E, E=D+T1, D=C, C=B, B=A, A=T1+T2
\
\ // H becomes T2, then add T1 for A
\ // D becomes D + T1 for E
\
vpaddd T1, _H, TMP3 \ // T1 = H + Kt
vmovdqu32 TMP0, _E \
vprord TMP1, _E, 6 \ // ROR_6(E)
vprord TMP2, _E, 11 \ // ROR_11(E)
vprord TMP3, _E, 25 \ // ROR_25(E)
vpternlogd TMP0, _F, _G, 0xCA \ // TMP0 = CH(E,F,G)
vpaddd T1, T1, _WT \ // T1 = T1 + Wt
vpternlogd TMP1, TMP2, TMP3, 0x96 \ // TMP1 = SIGMA1(E)
vpaddd T1, T1, TMP0 \ // T1 = T1 + CH(E,F,G)
vpaddd T1, T1, TMP1 \ // T1 = T1 + SIGMA1(E)
vpaddd _D, _D, T1 \ // D = D + T1
\
vprord _H, _A, 2 \ // ROR_2(A)
vprord TMP2, _A, 13 \ // ROR_13(A)
vprord TMP3, _A, 22 \ // ROR_22(A)
vmovdqu32 TMP0, _A \
vpternlogd TMP0, _B, _C, 0xE8 \ // TMP0 = MAJ(A,B,C)
vpternlogd _H, TMP2, TMP3, 0x96 \ // H(T2) = SIGMA0(A)
vpaddd _H, _H, TMP0 \ // H(T2) = SIGMA0(A) + MAJ(A,B,C)
vpaddd _H, _H, T1 \ // H(A) = H(T2) + T1
\
vmovdqu32 TMP3, [TBL + ((_ROUND+1)*64)] \ // Next Kt
#define MSG_SCHED_ROUND_16_63(_WT, _WTp1, _WTp9, _WTp14) \
vprord TMP4, _WTp14, 17 \ // ROR_17(Wt-2)
vprord TMP5, _WTp14, 19 \ // ROR_19(Wt-2)
vpsrld TMP6, _WTp14, 10 \ // SHR_10(Wt-2)
vpternlogd TMP4, TMP5, TMP6, 0x96 \ // TMP4 = sigma1(Wt-2)
\
vpaddd _WT, _WT, TMP4 \ // Wt = Wt-16 + sigma1(Wt-2)
vpaddd _WT, _WT, _WTp9 \ // Wt = Wt-16 + sigma1(Wt-2) + Wt-7
\
vprord TMP4, _WTp1, 7 \ // ROR_7(Wt-15)
vprord TMP5, _WTp1, 18 \ // ROR_18(Wt-15)
vpsrld TMP6, _WTp1, 3 \ // SHR_3(Wt-15)
vpternlogd TMP4, TMP5, TMP6, 0x96 \ // TMP4 = sigma0(Wt-15)
\
vpaddd _WT, _WT, TMP4 \ // Wt = Wt-16 + sigma1(Wt-2) +
\ // Wt-7 + sigma0(Wt-15) +
// Note this is reading in a block of data for one lane
// When all 16 are read, the data must be transposed to build msg schedule
#define MSG_SCHED_ROUND_00_15(_WT, OFFSET, LABEL) \
TESTQ $(1<<OFFSET), MASK_P9 \
JE LABEL \
MOVQ OFFSET*24(INPUT_P9), R9 \
vmovups _WT, [inp0+IDX] \
LABEL: \
#define MASKED_LOAD(_WT, OFFSET, LABEL) \
TESTQ $(1<<OFFSET), MASK_P9 \
JE LABEL \
MOVQ OFFSET*24(INPUT_P9), R9 \
vmovups _WT,[inp0+IDX] \
LABEL: \
TEXT ·sha256_x16_avx512(SB), 7, $0
MOVQ digests+0(FP), STATE_P9 //
MOVQ scratch+8(FP), SCRATCH_P9
MOVQ mask_len+32(FP), INP_SIZE_P9 // number of blocks to process
MOVQ mask+24(FP), MASKP_P9
MOVQ (MASKP_P9), MASK_P9
kmovq k1, mask
LEAQ inputs+48(FP), INPUT_P9
// Initialize digests
vmovdqu32 A, [STATE + 0*SHA256_DIGEST_ROW_SIZE]
vmovdqu32 B, [STATE + 1*SHA256_DIGEST_ROW_SIZE]
vmovdqu32 C, [STATE + 2*SHA256_DIGEST_ROW_SIZE]
vmovdqu32 D, [STATE + 3*SHA256_DIGEST_ROW_SIZE]
vmovdqu32 E, [STATE + 4*SHA256_DIGEST_ROW_SIZE]
vmovdqu32 F, [STATE + 5*SHA256_DIGEST_ROW_SIZE]
vmovdqu32 G, [STATE + 6*SHA256_DIGEST_ROW_SIZE]
vmovdqu32 H, [STATE + 7*SHA256_DIGEST_ROW_SIZE]
MOVQ table+16(FP), TBL_P9
xor IDX, IDX
// Read in first block of input data
MASKED_LOAD( W0, 0, skipInput0)
MASKED_LOAD( W1, 1, skipInput1)
MASKED_LOAD( W2, 2, skipInput2)
MASKED_LOAD( W3, 3, skipInput3)
MASKED_LOAD( W4, 4, skipInput4)
MASKED_LOAD( W5, 5, skipInput5)
MASKED_LOAD( W6, 6, skipInput6)
MASKED_LOAD( W7, 7, skipInput7)
MASKED_LOAD( W8, 8, skipInput8)
MASKED_LOAD( W9, 9, skipInput9)
MASKED_LOAD(W10, 10, skipInput10)
MASKED_LOAD(W11, 11, skipInput11)
MASKED_LOAD(W12, 12, skipInput12)
MASKED_LOAD(W13, 13, skipInput13)
MASKED_LOAD(W14, 14, skipInput14)
MASKED_LOAD(W15, 15, skipInput15)
lloop:
LEAQ PSHUFFLE_BYTE_FLIP_MASK<>(SB), TBL_P9
vmovdqu32 TMP2, [TBL]
// Get first K from table
MOVQ table+16(FP), TBL_P9
vmovdqu32 TMP3, [TBL]
// Save digests for later addition
vmovdqu32 [SCRATCH + 64*0], A
vmovdqu32 [SCRATCH + 64*1], B
vmovdqu32 [SCRATCH + 64*2], C
vmovdqu32 [SCRATCH + 64*3], D
vmovdqu32 [SCRATCH + 64*4], E
vmovdqu32 [SCRATCH + 64*5], F
vmovdqu32 [SCRATCH + 64*6], G
vmovdqu32 [SCRATCH + 64*7], H
add IDX, 64
// Transpose input data
TRANSPOSE16(W0, W1, W2, W3, W4, W5, W6, W7, W8, W9, W10, W11, W12, W13, W14, W15, TMP0, TMP1)
vpshufb W0, W0, TMP2
vpshufb W1, W1, TMP2
vpshufb W2, W2, TMP2
vpshufb W3, W3, TMP2
vpshufb W4, W4, TMP2
vpshufb W5, W5, TMP2
vpshufb W6, W6, TMP2
vpshufb W7, W7, TMP2
vpshufb W8, W8, TMP2
vpshufb W9, W9, TMP2
vpshufb W10, W10, TMP2
vpshufb W11, W11, TMP2
vpshufb W12, W12, TMP2
vpshufb W13, W13, TMP2
vpshufb W14, W14, TMP2
vpshufb W15, W15, TMP2
// MSG Schedule for W0-W15 is now complete in registers
// Process first 48 rounds
// Calculate next Wt+16 after processing is complete and Wt is unneeded
PROCESS_LOOP( W0, 0, A, B, C, D, E, F, G, H)
MSG_SCHED_ROUND_16_63( W0, W1, W9, W14)
PROCESS_LOOP( W1, 1, H, A, B, C, D, E, F, G)
MSG_SCHED_ROUND_16_63( W1, W2, W10, W15)
PROCESS_LOOP( W2, 2, G, H, A, B, C, D, E, F)
MSG_SCHED_ROUND_16_63( W2, W3, W11, W0)
PROCESS_LOOP( W3, 3, F, G, H, A, B, C, D, E)
MSG_SCHED_ROUND_16_63( W3, W4, W12, W1)
PROCESS_LOOP( W4, 4, E, F, G, H, A, B, C, D)
MSG_SCHED_ROUND_16_63( W4, W5, W13, W2)
PROCESS_LOOP( W5, 5, D, E, F, G, H, A, B, C)
MSG_SCHED_ROUND_16_63( W5, W6, W14, W3)
PROCESS_LOOP( W6, 6, C, D, E, F, G, H, A, B)
MSG_SCHED_ROUND_16_63( W6, W7, W15, W4)
PROCESS_LOOP( W7, 7, B, C, D, E, F, G, H, A)
MSG_SCHED_ROUND_16_63( W7, W8, W0, W5)
PROCESS_LOOP( W8, 8, A, B, C, D, E, F, G, H)
MSG_SCHED_ROUND_16_63( W8, W9, W1, W6)
PROCESS_LOOP( W9, 9, H, A, B, C, D, E, F, G)
MSG_SCHED_ROUND_16_63( W9, W10, W2, W7)
PROCESS_LOOP(W10, 10, G, H, A, B, C, D, E, F)
MSG_SCHED_ROUND_16_63(W10, W11, W3, W8)
PROCESS_LOOP(W11, 11, F, G, H, A, B, C, D, E)
MSG_SCHED_ROUND_16_63(W11, W12, W4, W9)
PROCESS_LOOP(W12, 12, E, F, G, H, A, B, C, D)
MSG_SCHED_ROUND_16_63(W12, W13, W5, W10)
PROCESS_LOOP(W13, 13, D, E, F, G, H, A, B, C)
MSG_SCHED_ROUND_16_63(W13, W14, W6, W11)
PROCESS_LOOP(W14, 14, C, D, E, F, G, H, A, B)
MSG_SCHED_ROUND_16_63(W14, W15, W7, W12)
PROCESS_LOOP(W15, 15, B, C, D, E, F, G, H, A)
MSG_SCHED_ROUND_16_63(W15, W0, W8, W13)
PROCESS_LOOP( W0, 16, A, B, C, D, E, F, G, H)
MSG_SCHED_ROUND_16_63( W0, W1, W9, W14)
PROCESS_LOOP( W1, 17, H, A, B, C, D, E, F, G)
MSG_SCHED_ROUND_16_63( W1, W2, W10, W15)
PROCESS_LOOP( W2, 18, G, H, A, B, C, D, E, F)
MSG_SCHED_ROUND_16_63( W2, W3, W11, W0)
PROCESS_LOOP( W3, 19, F, G, H, A, B, C, D, E)
MSG_SCHED_ROUND_16_63( W3, W4, W12, W1)
PROCESS_LOOP( W4, 20, E, F, G, H, A, B, C, D)
MSG_SCHED_ROUND_16_63( W4, W5, W13, W2)
PROCESS_LOOP( W5, 21, D, E, F, G, H, A, B, C)
MSG_SCHED_ROUND_16_63( W5, W6, W14, W3)
PROCESS_LOOP( W6, 22, C, D, E, F, G, H, A, B)
MSG_SCHED_ROUND_16_63( W6, W7, W15, W4)
PROCESS_LOOP( W7, 23, B, C, D, E, F, G, H, A)
MSG_SCHED_ROUND_16_63( W7, W8, W0, W5)
PROCESS_LOOP( W8, 24, A, B, C, D, E, F, G, H)
MSG_SCHED_ROUND_16_63( W8, W9, W1, W6)
PROCESS_LOOP( W9, 25, H, A, B, C, D, E, F, G)
MSG_SCHED_ROUND_16_63( W9, W10, W2, W7)
PROCESS_LOOP(W10, 26, G, H, A, B, C, D, E, F)
MSG_SCHED_ROUND_16_63(W10, W11, W3, W8)
PROCESS_LOOP(W11, 27, F, G, H, A, B, C, D, E)
MSG_SCHED_ROUND_16_63(W11, W12, W4, W9)
PROCESS_LOOP(W12, 28, E, F, G, H, A, B, C, D)
MSG_SCHED_ROUND_16_63(W12, W13, W5, W10)
PROCESS_LOOP(W13, 29, D, E, F, G, H, A, B, C)
MSG_SCHED_ROUND_16_63(W13, W14, W6, W11)
PROCESS_LOOP(W14, 30, C, D, E, F, G, H, A, B)
MSG_SCHED_ROUND_16_63(W14, W15, W7, W12)
PROCESS_LOOP(W15, 31, B, C, D, E, F, G, H, A)
MSG_SCHED_ROUND_16_63(W15, W0, W8, W13)
PROCESS_LOOP( W0, 32, A, B, C, D, E, F, G, H)
MSG_SCHED_ROUND_16_63( W0, W1, W9, W14)
PROCESS_LOOP( W1, 33, H, A, B, C, D, E, F, G)
MSG_SCHED_ROUND_16_63( W1, W2, W10, W15)
PROCESS_LOOP( W2, 34, G, H, A, B, C, D, E, F)
MSG_SCHED_ROUND_16_63( W2, W3, W11, W0)
PROCESS_LOOP( W3, 35, F, G, H, A, B, C, D, E)
MSG_SCHED_ROUND_16_63( W3, W4, W12, W1)
PROCESS_LOOP( W4, 36, E, F, G, H, A, B, C, D)
MSG_SCHED_ROUND_16_63( W4, W5, W13, W2)
PROCESS_LOOP( W5, 37, D, E, F, G, H, A, B, C)
MSG_SCHED_ROUND_16_63( W5, W6, W14, W3)
PROCESS_LOOP( W6, 38, C, D, E, F, G, H, A, B)
MSG_SCHED_ROUND_16_63( W6, W7, W15, W4)
PROCESS_LOOP( W7, 39, B, C, D, E, F, G, H, A)
MSG_SCHED_ROUND_16_63( W7, W8, W0, W5)
PROCESS_LOOP( W8, 40, A, B, C, D, E, F, G, H)
MSG_SCHED_ROUND_16_63( W8, W9, W1, W6)
PROCESS_LOOP( W9, 41, H, A, B, C, D, E, F, G)
MSG_SCHED_ROUND_16_63( W9, W10, W2, W7)
PROCESS_LOOP(W10, 42, G, H, A, B, C, D, E, F)
MSG_SCHED_ROUND_16_63(W10, W11, W3, W8)
PROCESS_LOOP(W11, 43, F, G, H, A, B, C, D, E)
MSG_SCHED_ROUND_16_63(W11, W12, W4, W9)
PROCESS_LOOP(W12, 44, E, F, G, H, A, B, C, D)
MSG_SCHED_ROUND_16_63(W12, W13, W5, W10)
PROCESS_LOOP(W13, 45, D, E, F, G, H, A, B, C)
MSG_SCHED_ROUND_16_63(W13, W14, W6, W11)
PROCESS_LOOP(W14, 46, C, D, E, F, G, H, A, B)
MSG_SCHED_ROUND_16_63(W14, W15, W7, W12)
PROCESS_LOOP(W15, 47, B, C, D, E, F, G, H, A)
MSG_SCHED_ROUND_16_63(W15, W0, W8, W13)
// Check if this is the last block
sub INP_SIZE, 1
JE lastLoop
// Load next mask for inputs
ADDQ $8, MASKP_P9
MOVQ (MASKP_P9), MASK_P9
// Process last 16 rounds
// Read in next block msg data for use in first 16 words of msg sched
PROCESS_LOOP( W0, 48, A, B, C, D, E, F, G, H)
MSG_SCHED_ROUND_00_15( W0, 0, skipNext0)
PROCESS_LOOP( W1, 49, H, A, B, C, D, E, F, G)
MSG_SCHED_ROUND_00_15( W1, 1, skipNext1)
PROCESS_LOOP( W2, 50, G, H, A, B, C, D, E, F)
MSG_SCHED_ROUND_00_15( W2, 2, skipNext2)
PROCESS_LOOP( W3, 51, F, G, H, A, B, C, D, E)
MSG_SCHED_ROUND_00_15( W3, 3, skipNext3)
PROCESS_LOOP( W4, 52, E, F, G, H, A, B, C, D)
MSG_SCHED_ROUND_00_15( W4, 4, skipNext4)
PROCESS_LOOP( W5, 53, D, E, F, G, H, A, B, C)
MSG_SCHED_ROUND_00_15( W5, 5, skipNext5)
PROCESS_LOOP( W6, 54, C, D, E, F, G, H, A, B)
MSG_SCHED_ROUND_00_15( W6, 6, skipNext6)
PROCESS_LOOP( W7, 55, B, C, D, E, F, G, H, A)
MSG_SCHED_ROUND_00_15( W7, 7, skipNext7)
PROCESS_LOOP( W8, 56, A, B, C, D, E, F, G, H)
MSG_SCHED_ROUND_00_15( W8, 8, skipNext8)
PROCESS_LOOP( W9, 57, H, A, B, C, D, E, F, G)
MSG_SCHED_ROUND_00_15( W9, 9, skipNext9)
PROCESS_LOOP(W10, 58, G, H, A, B, C, D, E, F)
MSG_SCHED_ROUND_00_15(W10, 10, skipNext10)
PROCESS_LOOP(W11, 59, F, G, H, A, B, C, D, E)
MSG_SCHED_ROUND_00_15(W11, 11, skipNext11)
PROCESS_LOOP(W12, 60, E, F, G, H, A, B, C, D)
MSG_SCHED_ROUND_00_15(W12, 12, skipNext12)
PROCESS_LOOP(W13, 61, D, E, F, G, H, A, B, C)
MSG_SCHED_ROUND_00_15(W13, 13, skipNext13)
PROCESS_LOOP(W14, 62, C, D, E, F, G, H, A, B)
MSG_SCHED_ROUND_00_15(W14, 14, skipNext14)
PROCESS_LOOP(W15, 63, B, C, D, E, F, G, H, A)
MSG_SCHED_ROUND_00_15(W15, 15, skipNext15)
// Add old digest
vmovdqu32 TMP2, A
vmovdqu32 A, [SCRATCH + 64*0]
vpaddd A{k1}, A, TMP2
vmovdqu32 TMP2, B
vmovdqu32 B, [SCRATCH + 64*1]
vpaddd B{k1}, B, TMP2
vmovdqu32 TMP2, C
vmovdqu32 C, [SCRATCH + 64*2]
vpaddd C{k1}, C, TMP2
vmovdqu32 TMP2, D
vmovdqu32 D, [SCRATCH + 64*3]
vpaddd D{k1}, D, TMP2
vmovdqu32 TMP2, E
vmovdqu32 E, [SCRATCH + 64*4]
vpaddd E{k1}, E, TMP2
vmovdqu32 TMP2, F
vmovdqu32 F, [SCRATCH + 64*5]
vpaddd F{k1}, F, TMP2
vmovdqu32 TMP2, G
vmovdqu32 G, [SCRATCH + 64*6]
vpaddd G{k1}, G, TMP2
vmovdqu32 TMP2, H
vmovdqu32 H, [SCRATCH + 64*7]
vpaddd H{k1}, H, TMP2
kmovq k1, mask
JMP lloop
lastLoop:
// Process last 16 rounds
PROCESS_LOOP( W0, 48, A, B, C, D, E, F, G, H)
PROCESS_LOOP( W1, 49, H, A, B, C, D, E, F, G)
PROCESS_LOOP( W2, 50, G, H, A, B, C, D, E, F)
PROCESS_LOOP( W3, 51, F, G, H, A, B, C, D, E)
PROCESS_LOOP( W4, 52, E, F, G, H, A, B, C, D)
PROCESS_LOOP( W5, 53, D, E, F, G, H, A, B, C)
PROCESS_LOOP( W6, 54, C, D, E, F, G, H, A, B)
PROCESS_LOOP( W7, 55, B, C, D, E, F, G, H, A)
PROCESS_LOOP( W8, 56, A, B, C, D, E, F, G, H)
PROCESS_LOOP( W9, 57, H, A, B, C, D, E, F, G)
PROCESS_LOOP(W10, 58, G, H, A, B, C, D, E, F)
PROCESS_LOOP(W11, 59, F, G, H, A, B, C, D, E)
PROCESS_LOOP(W12, 60, E, F, G, H, A, B, C, D)
PROCESS_LOOP(W13, 61, D, E, F, G, H, A, B, C)
PROCESS_LOOP(W14, 62, C, D, E, F, G, H, A, B)
PROCESS_LOOP(W15, 63, B, C, D, E, F, G, H, A)
// Add old digest
vmovdqu32 TMP2, A
vmovdqu32 A, [SCRATCH + 64*0]
vpaddd A{k1}, A, TMP2
vmovdqu32 TMP2, B
vmovdqu32 B, [SCRATCH + 64*1]
vpaddd B{k1}, B, TMP2
vmovdqu32 TMP2, C
vmovdqu32 C, [SCRATCH + 64*2]
vpaddd C{k1}, C, TMP2
vmovdqu32 TMP2, D
vmovdqu32 D, [SCRATCH + 64*3]
vpaddd D{k1}, D, TMP2
vmovdqu32 TMP2, E
vmovdqu32 E, [SCRATCH + 64*4]
vpaddd E{k1}, E, TMP2
vmovdqu32 TMP2, F
vmovdqu32 F, [SCRATCH + 64*5]
vpaddd F{k1}, F, TMP2
vmovdqu32 TMP2, G
vmovdqu32 G, [SCRATCH + 64*6]
vpaddd G{k1}, G, TMP2
vmovdqu32 TMP2, H
vmovdqu32 H, [SCRATCH + 64*7]
vpaddd H{k1}, H, TMP2
// Write out digest
vmovdqu32 [STATE + 0*SHA256_DIGEST_ROW_SIZE], A
vmovdqu32 [STATE + 1*SHA256_DIGEST_ROW_SIZE], B
vmovdqu32 [STATE + 2*SHA256_DIGEST_ROW_SIZE], C
vmovdqu32 [STATE + 3*SHA256_DIGEST_ROW_SIZE], D
vmovdqu32 [STATE + 4*SHA256_DIGEST_ROW_SIZE], E
vmovdqu32 [STATE + 5*SHA256_DIGEST_ROW_SIZE], F
vmovdqu32 [STATE + 6*SHA256_DIGEST_ROW_SIZE], G
vmovdqu32 [STATE + 7*SHA256_DIGEST_ROW_SIZE], H
VZEROUPPER
RET
//
// Tables
//
DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x000(SB)/8, $0x0405060700010203
DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x008(SB)/8, $0x0c0d0e0f08090a0b
DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x010(SB)/8, $0x0405060700010203
DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x018(SB)/8, $0x0c0d0e0f08090a0b
DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x020(SB)/8, $0x0405060700010203
DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x028(SB)/8, $0x0c0d0e0f08090a0b
DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x030(SB)/8, $0x0405060700010203
DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x038(SB)/8, $0x0c0d0e0f08090a0b
GLOBL PSHUFFLE_BYTE_FLIP_MASK<>(SB), 8, $64
DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x000(SB)/8, $0x0000000000000000
DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x008(SB)/8, $0x0000000000000001
DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x010(SB)/8, $0x0000000000000008
DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x018(SB)/8, $0x0000000000000009
DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x020(SB)/8, $0x0000000000000004
DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x028(SB)/8, $0x0000000000000005
DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x030(SB)/8, $0x000000000000000C
DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x038(SB)/8, $0x000000000000000D
GLOBL PSHUFFLE_TRANSPOSE16_MASK1<>(SB), 8, $64
DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x000(SB)/8, $0x0000000000000002
DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x008(SB)/8, $0x0000000000000003
DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x010(SB)/8, $0x000000000000000A
DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x018(SB)/8, $0x000000000000000B
DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x020(SB)/8, $0x0000000000000006
DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x028(SB)/8, $0x0000000000000007
DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x030(SB)/8, $0x000000000000000E
DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x038(SB)/8, $0x000000000000000F
GLOBL PSHUFFLE_TRANSPOSE16_MASK2<>(SB), 8, $64

View File

@ -0,0 +1,500 @@
//+build !noasm,!appengine
/*
* Minio Cloud Storage, (C) 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sha256
import (
"encoding/binary"
"errors"
"hash"
"sort"
"sync/atomic"
"time"
)
//go:noescape
func sha256X16Avx512(digests *[512]byte, scratch *[512]byte, table *[512]uint64, mask []uint64, inputs [16][]byte)
// Avx512ServerUID - Do not start at 0 but next multiple of 16 so as to be able to
// differentiate with default initialiation value of 0
const Avx512ServerUID = 16
var uidCounter uint64
// NewAvx512 - initialize sha256 Avx512 implementation.
func NewAvx512(a512srv *Avx512Server) hash.Hash {
uid := atomic.AddUint64(&uidCounter, 1)
return &Avx512Digest{uid: uid, a512srv: a512srv}
}
// Avx512Digest - Type for computing SHA256 using Avx512
type Avx512Digest struct {
uid uint64
a512srv *Avx512Server
x [chunk]byte
nx int
len uint64
final bool
result [Size]byte
}
// Size - Return size of checksum
func (d *Avx512Digest) Size() int { return Size }
// BlockSize - Return blocksize of checksum
func (d Avx512Digest) BlockSize() int { return BlockSize }
// Reset - reset sha digest to its initial values
func (d *Avx512Digest) Reset() {
d.a512srv.blocksCh <- blockInput{uid: d.uid, reset: true}
d.nx = 0
d.len = 0
d.final = false
}
// Write to digest
func (d *Avx512Digest) Write(p []byte) (nn int, err error) {
if d.final {
return 0, errors.New("Avx512Digest already finalized. Reset first before writing again")
}
nn = len(p)
d.len += uint64(nn)
if d.nx > 0 {
n := copy(d.x[d.nx:], p)
d.nx += n
if d.nx == chunk {
d.a512srv.blocksCh <- blockInput{uid: d.uid, msg: d.x[:]}
d.nx = 0
}
p = p[n:]
}
if len(p) >= chunk {
n := len(p) &^ (chunk - 1)
d.a512srv.blocksCh <- blockInput{uid: d.uid, msg: p[:n]}
p = p[n:]
}
if len(p) > 0 {
d.nx = copy(d.x[:], p)
}
return
}
// Sum - Return sha256 sum in bytes
func (d *Avx512Digest) Sum(in []byte) (result []byte) {
if d.final {
return append(in, d.result[:]...)
}
trail := make([]byte, 0, 128)
trail = append(trail, d.x[:d.nx]...)
len := d.len
// Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
var tmp [64]byte
tmp[0] = 0x80
if len%64 < 56 {
trail = append(trail, tmp[0:56-len%64]...)
} else {
trail = append(trail, tmp[0:64+56-len%64]...)
}
d.nx = 0
// Length in bits.
len <<= 3
for i := uint(0); i < 8; i++ {
tmp[i] = byte(len >> (56 - 8*i))
}
trail = append(trail, tmp[0:8]...)
sumCh := make(chan [Size]byte)
d.a512srv.blocksCh <- blockInput{uid: d.uid, msg: trail, final: true, sumCh: sumCh}
d.result = <-sumCh
d.final = true
return append(in, d.result[:]...)
}
var table = [512]uint64{
0x428a2f98428a2f98, 0x428a2f98428a2f98, 0x428a2f98428a2f98, 0x428a2f98428a2f98,
0x428a2f98428a2f98, 0x428a2f98428a2f98, 0x428a2f98428a2f98, 0x428a2f98428a2f98,
0x7137449171374491, 0x7137449171374491, 0x7137449171374491, 0x7137449171374491,
0x7137449171374491, 0x7137449171374491, 0x7137449171374491, 0x7137449171374491,
0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf,
0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf,
0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5,
0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5,
0x3956c25b3956c25b, 0x3956c25b3956c25b, 0x3956c25b3956c25b, 0x3956c25b3956c25b,
0x3956c25b3956c25b, 0x3956c25b3956c25b, 0x3956c25b3956c25b, 0x3956c25b3956c25b,
0x59f111f159f111f1, 0x59f111f159f111f1, 0x59f111f159f111f1, 0x59f111f159f111f1,
0x59f111f159f111f1, 0x59f111f159f111f1, 0x59f111f159f111f1, 0x59f111f159f111f1,
0x923f82a4923f82a4, 0x923f82a4923f82a4, 0x923f82a4923f82a4, 0x923f82a4923f82a4,
0x923f82a4923f82a4, 0x923f82a4923f82a4, 0x923f82a4923f82a4, 0x923f82a4923f82a4,
0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5,
0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5,
0xd807aa98d807aa98, 0xd807aa98d807aa98, 0xd807aa98d807aa98, 0xd807aa98d807aa98,
0xd807aa98d807aa98, 0xd807aa98d807aa98, 0xd807aa98d807aa98, 0xd807aa98d807aa98,
0x12835b0112835b01, 0x12835b0112835b01, 0x12835b0112835b01, 0x12835b0112835b01,
0x12835b0112835b01, 0x12835b0112835b01, 0x12835b0112835b01, 0x12835b0112835b01,
0x243185be243185be, 0x243185be243185be, 0x243185be243185be, 0x243185be243185be,
0x243185be243185be, 0x243185be243185be, 0x243185be243185be, 0x243185be243185be,
0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3,
0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3,
0x72be5d7472be5d74, 0x72be5d7472be5d74, 0x72be5d7472be5d74, 0x72be5d7472be5d74,
0x72be5d7472be5d74, 0x72be5d7472be5d74, 0x72be5d7472be5d74, 0x72be5d7472be5d74,
0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe,
0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe,
0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7,
0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7,
0xc19bf174c19bf174, 0xc19bf174c19bf174, 0xc19bf174c19bf174, 0xc19bf174c19bf174,
0xc19bf174c19bf174, 0xc19bf174c19bf174, 0xc19bf174c19bf174, 0xc19bf174c19bf174,
0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1,
0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1,
0xefbe4786efbe4786, 0xefbe4786efbe4786, 0xefbe4786efbe4786, 0xefbe4786efbe4786,
0xefbe4786efbe4786, 0xefbe4786efbe4786, 0xefbe4786efbe4786, 0xefbe4786efbe4786,
0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6,
0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6,
0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc,
0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc,
0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f,
0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f,
0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa,
0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa,
0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc,
0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc,
0x76f988da76f988da, 0x76f988da76f988da, 0x76f988da76f988da, 0x76f988da76f988da,
0x76f988da76f988da, 0x76f988da76f988da, 0x76f988da76f988da, 0x76f988da76f988da,
0x983e5152983e5152, 0x983e5152983e5152, 0x983e5152983e5152, 0x983e5152983e5152,
0x983e5152983e5152, 0x983e5152983e5152, 0x983e5152983e5152, 0x983e5152983e5152,
0xa831c66da831c66d, 0xa831c66da831c66d, 0xa831c66da831c66d, 0xa831c66da831c66d,
0xa831c66da831c66d, 0xa831c66da831c66d, 0xa831c66da831c66d, 0xa831c66da831c66d,
0xb00327c8b00327c8, 0xb00327c8b00327c8, 0xb00327c8b00327c8, 0xb00327c8b00327c8,
0xb00327c8b00327c8, 0xb00327c8b00327c8, 0xb00327c8b00327c8, 0xb00327c8b00327c8,
0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7,
0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7,
0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3,
0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3,
0xd5a79147d5a79147, 0xd5a79147d5a79147, 0xd5a79147d5a79147, 0xd5a79147d5a79147,
0xd5a79147d5a79147, 0xd5a79147d5a79147, 0xd5a79147d5a79147, 0xd5a79147d5a79147,
0x06ca635106ca6351, 0x06ca635106ca6351, 0x06ca635106ca6351, 0x06ca635106ca6351,
0x06ca635106ca6351, 0x06ca635106ca6351, 0x06ca635106ca6351, 0x06ca635106ca6351,
0x1429296714292967, 0x1429296714292967, 0x1429296714292967, 0x1429296714292967,
0x1429296714292967, 0x1429296714292967, 0x1429296714292967, 0x1429296714292967,
0x27b70a8527b70a85, 0x27b70a8527b70a85, 0x27b70a8527b70a85, 0x27b70a8527b70a85,
0x27b70a8527b70a85, 0x27b70a8527b70a85, 0x27b70a8527b70a85, 0x27b70a8527b70a85,
0x2e1b21382e1b2138, 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, 0x2e1b21382e1b2138,
0x2e1b21382e1b2138, 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, 0x2e1b21382e1b2138,
0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc,
0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc,
0x53380d1353380d13, 0x53380d1353380d13, 0x53380d1353380d13, 0x53380d1353380d13,
0x53380d1353380d13, 0x53380d1353380d13, 0x53380d1353380d13, 0x53380d1353380d13,
0x650a7354650a7354, 0x650a7354650a7354, 0x650a7354650a7354, 0x650a7354650a7354,
0x650a7354650a7354, 0x650a7354650a7354, 0x650a7354650a7354, 0x650a7354650a7354,
0x766a0abb766a0abb, 0x766a0abb766a0abb, 0x766a0abb766a0abb, 0x766a0abb766a0abb,
0x766a0abb766a0abb, 0x766a0abb766a0abb, 0x766a0abb766a0abb, 0x766a0abb766a0abb,
0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e,
0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e,
0x92722c8592722c85, 0x92722c8592722c85, 0x92722c8592722c85, 0x92722c8592722c85,
0x92722c8592722c85, 0x92722c8592722c85, 0x92722c8592722c85, 0x92722c8592722c85,
0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1,
0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1,
0xa81a664ba81a664b, 0xa81a664ba81a664b, 0xa81a664ba81a664b, 0xa81a664ba81a664b,
0xa81a664ba81a664b, 0xa81a664ba81a664b, 0xa81a664ba81a664b, 0xa81a664ba81a664b,
0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70,
0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70,
0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3,
0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3,
0xd192e819d192e819, 0xd192e819d192e819, 0xd192e819d192e819, 0xd192e819d192e819,
0xd192e819d192e819, 0xd192e819d192e819, 0xd192e819d192e819, 0xd192e819d192e819,
0xd6990624d6990624, 0xd6990624d6990624, 0xd6990624d6990624, 0xd6990624d6990624,
0xd6990624d6990624, 0xd6990624d6990624, 0xd6990624d6990624, 0xd6990624d6990624,
0xf40e3585f40e3585, 0xf40e3585f40e3585, 0xf40e3585f40e3585, 0xf40e3585f40e3585,
0xf40e3585f40e3585, 0xf40e3585f40e3585, 0xf40e3585f40e3585, 0xf40e3585f40e3585,
0x106aa070106aa070, 0x106aa070106aa070, 0x106aa070106aa070, 0x106aa070106aa070,
0x106aa070106aa070, 0x106aa070106aa070, 0x106aa070106aa070, 0x106aa070106aa070,
0x19a4c11619a4c116, 0x19a4c11619a4c116, 0x19a4c11619a4c116, 0x19a4c11619a4c116,
0x19a4c11619a4c116, 0x19a4c11619a4c116, 0x19a4c11619a4c116, 0x19a4c11619a4c116,
0x1e376c081e376c08, 0x1e376c081e376c08, 0x1e376c081e376c08, 0x1e376c081e376c08,
0x1e376c081e376c08, 0x1e376c081e376c08, 0x1e376c081e376c08, 0x1e376c081e376c08,
0x2748774c2748774c, 0x2748774c2748774c, 0x2748774c2748774c, 0x2748774c2748774c,
0x2748774c2748774c, 0x2748774c2748774c, 0x2748774c2748774c, 0x2748774c2748774c,
0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5,
0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5,
0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3,
0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3,
0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a,
0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a,
0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f,
0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f,
0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3,
0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3,
0x748f82ee748f82ee, 0x748f82ee748f82ee, 0x748f82ee748f82ee, 0x748f82ee748f82ee,
0x748f82ee748f82ee, 0x748f82ee748f82ee, 0x748f82ee748f82ee, 0x748f82ee748f82ee,
0x78a5636f78a5636f, 0x78a5636f78a5636f, 0x78a5636f78a5636f, 0x78a5636f78a5636f,
0x78a5636f78a5636f, 0x78a5636f78a5636f, 0x78a5636f78a5636f, 0x78a5636f78a5636f,
0x84c8781484c87814, 0x84c8781484c87814, 0x84c8781484c87814, 0x84c8781484c87814,
0x84c8781484c87814, 0x84c8781484c87814, 0x84c8781484c87814, 0x84c8781484c87814,
0x8cc702088cc70208, 0x8cc702088cc70208, 0x8cc702088cc70208, 0x8cc702088cc70208,
0x8cc702088cc70208, 0x8cc702088cc70208, 0x8cc702088cc70208, 0x8cc702088cc70208,
0x90befffa90befffa, 0x90befffa90befffa, 0x90befffa90befffa, 0x90befffa90befffa,
0x90befffa90befffa, 0x90befffa90befffa, 0x90befffa90befffa, 0x90befffa90befffa,
0xa4506ceba4506ceb, 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, 0xa4506ceba4506ceb,
0xa4506ceba4506ceb, 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, 0xa4506ceba4506ceb,
0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7,
0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7,
0xc67178f2c67178f2, 0xc67178f2c67178f2, 0xc67178f2c67178f2, 0xc67178f2c67178f2,
0xc67178f2c67178f2, 0xc67178f2c67178f2, 0xc67178f2c67178f2, 0xc67178f2c67178f2}
// Interface function to assembly ode
func blockAvx512(digests *[512]byte, input [16][]byte, mask []uint64) [16][Size]byte {
scratch := [512]byte{}
sha256X16Avx512(digests, &scratch, &table, mask, input)
output := [16][Size]byte{}
for i := 0; i < 16; i++ {
output[i] = getDigest(i, digests[:])
}
return output
}
func getDigest(index int, state []byte) (sum [Size]byte) {
for j := 0; j < 16; j += 2 {
for i := index*4 + j*Size; i < index*4+(j+1)*Size; i += Size {
binary.BigEndian.PutUint32(sum[j*2:], binary.LittleEndian.Uint32(state[i:i+4]))
}
}
return
}
// Message to send across input channel
type blockInput struct {
uid uint64
msg []byte
reset bool
final bool
sumCh chan [Size]byte
}
// Avx512Server - Type to implement 16x parallel handling of SHA256 invocations
type Avx512Server struct {
blocksCh chan blockInput // Input channel
totalIn int // Total number of inputs waiting to be processed
lanes [16]Avx512LaneInfo // Array with info per lane (out of 16)
digests map[uint64][Size]byte // Map of uids to (interim) digest results
}
// Avx512LaneInfo - Info for each lane
type Avx512LaneInfo struct {
uid uint64 // unique identification for this SHA processing
block []byte // input block to be processed
outputCh chan [Size]byte // channel for output result
}
// NewAvx512Server - Create new object for parallel processing handling
func NewAvx512Server() *Avx512Server {
a512srv := &Avx512Server{}
a512srv.digests = make(map[uint64][Size]byte)
a512srv.blocksCh = make(chan blockInput)
// Start a single thread for reading from the input channel
go a512srv.Process()
return a512srv
}
// Process - Sole handler for reading from the input channel
func (a512srv *Avx512Server) Process() {
for {
select {
case block := <-a512srv.blocksCh:
if block.reset {
a512srv.reset(block.uid)
continue
}
index := block.uid & 0xf
// fmt.Println("Adding message:", block.uid, index)
if a512srv.lanes[index].block != nil { // If slot is already filled, process all inputs
//fmt.Println("Invoking Blocks()")
a512srv.blocks()
}
a512srv.totalIn++
a512srv.lanes[index] = Avx512LaneInfo{uid: block.uid, block: block.msg}
if block.final {
a512srv.lanes[index].outputCh = block.sumCh
}
if a512srv.totalIn == len(a512srv.lanes) {
// fmt.Println("Invoking Blocks() while FULL: ")
a512srv.blocks()
}
// TODO: test with larger timeout
case <-time.After(1 * time.Microsecond):
for _, lane := range a512srv.lanes {
if lane.block != nil { // check if there is any input to process
// fmt.Println("Invoking Blocks() on TIMEOUT: ")
a512srv.blocks()
break // we are done
}
}
}
}
}
// Do a reset for this calculation
func (a512srv *Avx512Server) reset(uid uint64) {
// Check if there is a message still waiting to be processed (and remove if so)
for i, lane := range a512srv.lanes {
if lane.uid == uid {
if lane.block != nil {
a512srv.lanes[i] = Avx512LaneInfo{} // clear message
a512srv.totalIn--
}
}
}
// Delete entry from hash map
delete(a512srv.digests, uid)
}
// Invoke assembly and send results back
func (a512srv *Avx512Server) blocks() {
inputs := [16][]byte{}
for i := range inputs {
inputs[i] = a512srv.lanes[i].block
}
mask := expandMask(genMask(inputs))
outputs := blockAvx512(a512srv.getDigests(), inputs, mask)
a512srv.totalIn = 0
for i := 0; i < len(outputs); i++ {
uid, outputCh := a512srv.lanes[i].uid, a512srv.lanes[i].outputCh
a512srv.digests[uid] = outputs[i]
a512srv.lanes[i] = Avx512LaneInfo{}
if outputCh != nil {
// Send back result
outputCh <- outputs[i]
delete(a512srv.digests, uid) // Delete entry from hashmap
}
}
}
func (a512srv *Avx512Server) Write(uid uint64, p []byte) (nn int, err error) {
a512srv.blocksCh <- blockInput{uid: uid, msg: p}
return len(p), nil
}
// Sum - return sha256 sum in bytes for a given sum id.
func (a512srv *Avx512Server) Sum(uid uint64, p []byte) [32]byte {
sumCh := make(chan [32]byte)
a512srv.blocksCh <- blockInput{uid: uid, msg: p, final: true, sumCh: sumCh}
return <-sumCh
}
func (a512srv *Avx512Server) getDigests() *[512]byte {
digests := [512]byte{}
for i, lane := range a512srv.lanes {
a, ok := a512srv.digests[lane.uid]
if ok {
binary.BigEndian.PutUint32(digests[(i+0*16)*4:], binary.LittleEndian.Uint32(a[0:4]))
binary.BigEndian.PutUint32(digests[(i+1*16)*4:], binary.LittleEndian.Uint32(a[4:8]))
binary.BigEndian.PutUint32(digests[(i+2*16)*4:], binary.LittleEndian.Uint32(a[8:12]))
binary.BigEndian.PutUint32(digests[(i+3*16)*4:], binary.LittleEndian.Uint32(a[12:16]))
binary.BigEndian.PutUint32(digests[(i+4*16)*4:], binary.LittleEndian.Uint32(a[16:20]))
binary.BigEndian.PutUint32(digests[(i+5*16)*4:], binary.LittleEndian.Uint32(a[20:24]))
binary.BigEndian.PutUint32(digests[(i+6*16)*4:], binary.LittleEndian.Uint32(a[24:28]))
binary.BigEndian.PutUint32(digests[(i+7*16)*4:], binary.LittleEndian.Uint32(a[28:32]))
} else {
binary.LittleEndian.PutUint32(digests[(i+0*16)*4:], init0)
binary.LittleEndian.PutUint32(digests[(i+1*16)*4:], init1)
binary.LittleEndian.PutUint32(digests[(i+2*16)*4:], init2)
binary.LittleEndian.PutUint32(digests[(i+3*16)*4:], init3)
binary.LittleEndian.PutUint32(digests[(i+4*16)*4:], init4)
binary.LittleEndian.PutUint32(digests[(i+5*16)*4:], init5)
binary.LittleEndian.PutUint32(digests[(i+6*16)*4:], init6)
binary.LittleEndian.PutUint32(digests[(i+7*16)*4:], init7)
}
}
return &digests
}
// Helper struct for sorting blocks based on length
type lane struct {
len uint
pos uint
}
type lanes []lane
func (lns lanes) Len() int { return len(lns) }
func (lns lanes) Swap(i, j int) { lns[i], lns[j] = lns[j], lns[i] }
func (lns lanes) Less(i, j int) bool { return lns[i].len < lns[j].len }
// Helper struct for
type maskRounds struct {
mask uint64
rounds uint64
}
func genMask(input [16][]byte) [16]maskRounds {
// Sort on blocks length small to large
var sorted [16]lane
for c, inpt := range input {
sorted[c] = lane{uint(len(inpt)), uint(c)}
}
sort.Sort(lanes(sorted[:]))
// Create mask array including 'rounds' between masks
m, round, index := uint64(0xffff), uint64(0), 0
var mr [16]maskRounds
for _, s := range sorted {
if s.len > 0 {
if uint64(s.len)>>6 > round {
mr[index] = maskRounds{m, (uint64(s.len) >> 6) - round}
index++
}
round = uint64(s.len) >> 6
}
m = m & ^(1 << uint(s.pos))
}
return mr
}
// TODO: remove function
func expandMask(mr [16]maskRounds) []uint64 {
size := uint64(0)
for _, r := range mr {
size += r.rounds
}
result, index := make([]uint64, size), 0
for _, r := range mr {
for j := uint64(0); j < r.rounds; j++ {
result[index] = r.mask
index++
}
}
return result
}

File diff suppressed because one or more lines are too long

Some files were not shown because too many files have changed in this diff Show More