2014-11-16 20:13:20 +00:00
|
|
|
// Copyright (C) 2014 The Syncthing Authors.
|
2014-09-29 19:43:32 +00:00
|
|
|
//
|
2015-03-07 20:36:35 +00:00
|
|
|
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
2017-02-09 06:52:18 +00:00
|
|
|
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
2014-06-01 20:50:14 +00:00
|
|
|
|
2014-12-22 10:03:17 +00:00
|
|
|
// +build !noupgrade
|
2014-06-16 07:52:14 +00:00
|
|
|
|
2014-07-31 14:01:23 +00:00
|
|
|
package upgrade
|
2014-05-02 08:01:09 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"archive/tar"
|
2014-12-22 10:03:17 +00:00
|
|
|
"archive/zip"
|
|
|
|
"bytes"
|
2014-05-02 08:01:09 +00:00
|
|
|
"compress/gzip"
|
2015-08-21 08:13:31 +00:00
|
|
|
"crypto/tls"
|
2014-05-02 08:01:09 +00:00
|
|
|
"encoding/json"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
2014-05-02 15:04:45 +00:00
|
|
|
"io/ioutil"
|
2014-05-02 08:01:09 +00:00
|
|
|
"net/http"
|
|
|
|
"os"
|
|
|
|
"path"
|
|
|
|
"path/filepath"
|
2014-12-22 10:03:17 +00:00
|
|
|
"runtime"
|
2015-03-28 09:21:00 +00:00
|
|
|
"sort"
|
2014-05-02 08:01:09 +00:00
|
|
|
"strings"
|
2016-05-06 13:58:34 +00:00
|
|
|
"time"
|
2015-08-21 08:13:31 +00:00
|
|
|
|
2015-10-13 18:52:22 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/dialer"
|
2015-08-21 08:13:31 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/signature"
|
2014-05-02 08:01:09 +00:00
|
|
|
)
|
|
|
|
|
2015-09-06 14:36:09 +00:00
|
|
|
const DisabledByCompilation = false
|
|
|
|
|
2016-05-06 13:58:34 +00:00
|
|
|
const (
|
|
|
|
// Current binary size hovers around 10 MB. We give it some room to grow
|
|
|
|
// and say that we never expect the binary to be larger than 64 MB.
|
|
|
|
maxBinarySize = 64 << 20 // 64 MiB
|
|
|
|
|
|
|
|
// The max expected size of the signature file.
|
2016-05-13 09:01:31 +00:00
|
|
|
maxSignatureSize = 10 << 10 // 10 KiB
|
2016-05-06 13:58:34 +00:00
|
|
|
|
|
|
|
// We set the same limit on the archive. The binary will compress and we
|
|
|
|
// include some other stuff - currently the release archive size is
|
|
|
|
// around 6 MB.
|
|
|
|
maxArchiveSize = maxBinarySize
|
|
|
|
|
|
|
|
// When looking through the archive for the binary and signature, stop
|
|
|
|
// looking once we've searched this many files.
|
|
|
|
maxArchiveMembers = 100
|
|
|
|
|
|
|
|
// Archive reads, or metadata checks, that take longer than this will be
|
|
|
|
// rejected.
|
|
|
|
readTimeout = 30 * time.Minute
|
|
|
|
|
|
|
|
// The limit on the size of metadata that we accept.
|
2016-05-13 09:01:31 +00:00
|
|
|
maxMetadataSize = 10 << 20 // 10 MiB
|
2016-05-06 13:58:34 +00:00
|
|
|
)
|
|
|
|
|
2015-08-21 08:13:31 +00:00
|
|
|
// This is an HTTP/HTTPS client that does *not* perform certificate
|
|
|
|
// validation. We do this because some systems where Syncthing runs have
|
|
|
|
// issues with old or missing CA roots. It doesn't actually matter that we
|
|
|
|
// load the upgrade insecurely as we verify an ECDSA signature of the actual
|
|
|
|
// binary contents before accepting the upgrade.
|
|
|
|
var insecureHTTP = &http.Client{
|
2016-05-06 13:58:34 +00:00
|
|
|
Timeout: readTimeout,
|
2015-08-21 08:13:31 +00:00
|
|
|
Transport: &http.Transport{
|
2015-10-13 18:52:22 +00:00
|
|
|
Dial: dialer.Dial,
|
|
|
|
Proxy: http.ProxyFromEnvironment,
|
2015-08-21 08:13:31 +00:00
|
|
|
TLSClientConfig: &tls.Config{
|
|
|
|
InsecureSkipVerify: true,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2016-05-13 09:01:31 +00:00
|
|
|
func insecureGet(url, version string) (*http.Response, error) {
|
|
|
|
req, err := http.NewRequest("GET", url, nil)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2016-05-13 14:11:59 +00:00
|
|
|
|
|
|
|
req.Header.Set("User-Agent", fmt.Sprintf(`syncthing %s (%s %s-%s)`, version, runtime.Version(), runtime.GOOS, runtime.GOARCH))
|
2016-05-13 09:01:31 +00:00
|
|
|
return insecureHTTP.Do(req)
|
|
|
|
}
|
|
|
|
|
2017-01-27 12:17:06 +00:00
|
|
|
// FetchLatestReleases returns the latest releases. The "current" parameter
|
|
|
|
// is used for setting the User-Agent only.
|
|
|
|
func FetchLatestReleases(releasesURL, current string) []Release {
|
|
|
|
resp, err := insecureGet(releasesURL, current)
|
2014-07-14 08:45:29 +00:00
|
|
|
if err != nil {
|
2015-11-23 23:41:50 +00:00
|
|
|
l.Infoln("Couldn't fetch release information:", err)
|
|
|
|
return nil
|
2014-07-14 08:45:29 +00:00
|
|
|
}
|
2014-07-31 08:26:45 +00:00
|
|
|
if resp.StatusCode > 299 {
|
2016-01-20 02:32:33 +00:00
|
|
|
l.Infoln("API call returned HTTP error:", resp.Status)
|
2015-11-23 23:41:50 +00:00
|
|
|
return nil
|
2014-07-31 08:26:45 +00:00
|
|
|
}
|
2014-07-14 08:45:29 +00:00
|
|
|
|
2014-07-31 14:01:23 +00:00
|
|
|
var rels []Release
|
2016-05-13 09:01:31 +00:00
|
|
|
err = json.NewDecoder(io.LimitReader(resp.Body, maxMetadataSize)).Decode(&rels)
|
|
|
|
if err != nil {
|
|
|
|
l.Infoln("Fetching release information:", err)
|
|
|
|
}
|
2014-07-14 08:45:29 +00:00
|
|
|
resp.Body.Close()
|
|
|
|
|
2015-11-23 23:41:50 +00:00
|
|
|
return rels
|
2015-03-28 09:21:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type SortByRelease []Release
|
|
|
|
|
|
|
|
func (s SortByRelease) Len() int {
|
|
|
|
return len(s)
|
|
|
|
}
|
|
|
|
func (s SortByRelease) Swap(i, j int) {
|
|
|
|
s[i], s[j] = s[j], s[i]
|
|
|
|
}
|
|
|
|
func (s SortByRelease) Less(i, j int) bool {
|
|
|
|
return CompareVersions(s[i].Tag, s[j].Tag) > 0
|
|
|
|
}
|
|
|
|
|
2017-01-27 12:17:06 +00:00
|
|
|
func LatestRelease(releasesURL, current string, upgradeToPreReleases bool) (Release, error) {
|
|
|
|
rels := FetchLatestReleases(releasesURL, current)
|
|
|
|
return SelectLatestRelease(rels, current, upgradeToPreReleases)
|
2015-04-09 20:44:36 +00:00
|
|
|
}
|
|
|
|
|
2017-01-27 12:17:06 +00:00
|
|
|
func SelectLatestRelease(rels []Release, current string, upgradeToPreReleases bool) (Release, error) {
|
2014-07-31 07:04:57 +00:00
|
|
|
if len(rels) == 0 {
|
2015-11-24 00:37:42 +00:00
|
|
|
return Release{}, ErrNoVersionToSelect
|
2014-07-31 07:04:57 +00:00
|
|
|
}
|
|
|
|
|
2016-05-25 14:01:52 +00:00
|
|
|
// Sort the releases, lowest version number first
|
|
|
|
sort.Sort(sort.Reverse(SortByRelease(rels)))
|
2014-12-08 15:36:15 +00:00
|
|
|
|
2016-05-25 14:01:52 +00:00
|
|
|
var selected Release
|
2014-12-08 15:36:15 +00:00
|
|
|
for _, rel := range rels {
|
2018-01-15 12:13:25 +00:00
|
|
|
if CompareVersions(rel.Tag, current) == MajorNewer {
|
2016-05-25 14:01:52 +00:00
|
|
|
// We've found a new major version. That's fine, but if we've
|
|
|
|
// already found a minor upgrade that is acceptable we should go
|
|
|
|
// with that one first and then revisit in the future.
|
2017-01-27 12:17:06 +00:00
|
|
|
if selected.Tag != "" && CompareVersions(selected.Tag, current) == Newer {
|
2016-05-25 14:01:52 +00:00
|
|
|
return selected, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-27 12:17:06 +00:00
|
|
|
if rel.Prerelease && !upgradeToPreReleases {
|
2018-01-15 12:13:25 +00:00
|
|
|
l.Debugln("skipping pre-release", rel.Tag)
|
2015-03-28 09:21:00 +00:00
|
|
|
continue
|
|
|
|
}
|
2018-01-27 09:07:19 +00:00
|
|
|
|
|
|
|
expectedReleases := releaseNames(rel.Tag)
|
|
|
|
nextAsset:
|
2015-03-28 09:21:00 +00:00
|
|
|
for _, asset := range rel.Assets {
|
|
|
|
assetName := path.Base(asset.Name)
|
|
|
|
// Check for the architecture
|
2018-01-27 09:07:19 +00:00
|
|
|
for _, expRel := range expectedReleases {
|
|
|
|
if strings.HasPrefix(assetName, expRel) {
|
|
|
|
l.Debugln("selected", rel.Tag)
|
|
|
|
selected = rel
|
|
|
|
break nextAsset
|
|
|
|
}
|
2015-03-28 09:21:00 +00:00
|
|
|
}
|
2014-07-24 10:55:41 +00:00
|
|
|
}
|
2014-07-14 08:45:29 +00:00
|
|
|
}
|
2016-05-25 14:01:52 +00:00
|
|
|
|
|
|
|
if selected.Tag == "" {
|
|
|
|
return Release{}, ErrNoReleaseDownload
|
|
|
|
}
|
|
|
|
|
|
|
|
return selected, nil
|
2014-05-02 08:01:09 +00:00
|
|
|
}
|
|
|
|
|
2014-12-22 10:03:17 +00:00
|
|
|
// Upgrade to the given release, saving the previous binary with a ".old" extension.
|
|
|
|
func upgradeTo(binary string, rel Release) error {
|
2018-01-27 09:07:19 +00:00
|
|
|
expectedReleases := releaseNames(rel.Tag)
|
2014-12-22 10:03:17 +00:00
|
|
|
for _, asset := range rel.Assets {
|
|
|
|
assetName := path.Base(asset.Name)
|
2015-10-03 15:25:21 +00:00
|
|
|
l.Debugln("considering release", assetName)
|
2014-12-22 10:03:17 +00:00
|
|
|
|
2018-01-27 09:07:19 +00:00
|
|
|
for _, expRel := range expectedReleases {
|
|
|
|
if strings.HasPrefix(assetName, expRel) {
|
|
|
|
return upgradeToURL(assetName, binary, asset.URL)
|
|
|
|
}
|
2014-12-22 10:03:17 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-24 00:37:42 +00:00
|
|
|
return ErrNoReleaseDownload
|
2014-12-22 10:03:17 +00:00
|
|
|
}
|
|
|
|
|
2014-12-22 11:07:04 +00:00
|
|
|
// Upgrade to the given release, saving the previous binary with a ".old" extension.
|
2016-05-06 13:30:35 +00:00
|
|
|
func upgradeToURL(archiveName, binary string, url string) error {
|
|
|
|
fname, err := readRelease(archiveName, filepath.Dir(binary), url)
|
2014-12-22 11:07:04 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-08-23 06:53:39 +00:00
|
|
|
defer os.Remove(fname)
|
2014-12-22 11:07:04 +00:00
|
|
|
|
|
|
|
old := binary + ".old"
|
2015-01-14 20:33:12 +00:00
|
|
|
os.Remove(old)
|
2014-12-22 11:07:04 +00:00
|
|
|
err = os.Rename(binary, old)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-02-02 10:45:17 +00:00
|
|
|
if err := os.Rename(fname, binary); err != nil {
|
2019-02-02 11:16:27 +00:00
|
|
|
os.Rename(old, binary)
|
2016-08-23 06:53:39 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
2014-12-22 11:07:04 +00:00
|
|
|
}
|
|
|
|
|
2016-05-06 13:30:35 +00:00
|
|
|
func readRelease(archiveName, dir, url string) (string, error) {
|
2015-10-03 15:25:21 +00:00
|
|
|
l.Debugf("loading %q", url)
|
2014-08-07 13:57:20 +00:00
|
|
|
|
2014-05-02 08:01:09 +00:00
|
|
|
req, err := http.NewRequest("GET", url, nil)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
req.Header.Add("Accept", "application/octet-stream")
|
2015-08-21 08:13:31 +00:00
|
|
|
resp, err := insecureHTTP.Do(req)
|
2014-05-02 08:01:09 +00:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
defer resp.Body.Close()
|
|
|
|
|
2014-12-22 10:03:17 +00:00
|
|
|
switch runtime.GOOS {
|
|
|
|
case "windows":
|
2016-05-06 13:58:34 +00:00
|
|
|
return readZip(archiveName, dir, io.LimitReader(resp.Body, maxArchiveSize))
|
2014-12-22 10:03:17 +00:00
|
|
|
default:
|
2016-05-06 13:58:34 +00:00
|
|
|
return readTarGz(archiveName, dir, io.LimitReader(resp.Body, maxArchiveSize))
|
2014-12-22 10:03:17 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-06 13:30:35 +00:00
|
|
|
func readTarGz(archiveName, dir string, r io.Reader) (string, error) {
|
2014-12-22 10:03:17 +00:00
|
|
|
gr, err := gzip.NewReader(r)
|
2014-05-02 08:01:09 +00:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
tr := tar.NewReader(gr)
|
|
|
|
|
2015-08-21 08:13:31 +00:00
|
|
|
var tempName string
|
|
|
|
var sig []byte
|
2014-12-22 10:03:17 +00:00
|
|
|
|
2014-05-02 08:01:09 +00:00
|
|
|
// Iterate through the files in the archive.
|
2016-05-06 13:58:34 +00:00
|
|
|
i := 0
|
2014-05-02 08:01:09 +00:00
|
|
|
for {
|
2016-05-06 13:58:34 +00:00
|
|
|
if i >= maxArchiveMembers {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
i++
|
|
|
|
|
2014-05-02 08:01:09 +00:00
|
|
|
hdr, err := tr.Next()
|
|
|
|
if err == io.EOF {
|
|
|
|
// end of tar archive
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
2016-05-06 14:14:19 +00:00
|
|
|
if hdr.Size > maxBinarySize {
|
|
|
|
// We don't even want to try processing or skipping over files
|
|
|
|
// that are too large.
|
|
|
|
break
|
|
|
|
}
|
2014-12-22 10:03:17 +00:00
|
|
|
|
2015-11-09 22:32:26 +00:00
|
|
|
err = archiveFileVisitor(dir, &tempName, &sig, hdr.Name, tr)
|
2015-08-21 08:13:31 +00:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
2014-12-22 10:03:17 +00:00
|
|
|
|
2015-08-21 08:13:31 +00:00
|
|
|
if tempName != "" && sig != nil {
|
|
|
|
break
|
2014-12-22 10:03:17 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-06 13:30:35 +00:00
|
|
|
if err := verifyUpgrade(archiveName, tempName, sig); err != nil {
|
2015-08-21 08:13:31 +00:00
|
|
|
return "", err
|
2014-12-22 10:03:17 +00:00
|
|
|
}
|
2015-08-21 08:13:31 +00:00
|
|
|
|
|
|
|
return tempName, nil
|
2014-12-22 10:03:17 +00:00
|
|
|
}
|
|
|
|
|
2016-05-06 13:30:35 +00:00
|
|
|
func readZip(archiveName, dir string, r io.Reader) (string, error) {
|
2014-12-22 10:03:17 +00:00
|
|
|
body, err := ioutil.ReadAll(r)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
archive, err := zip.NewReader(bytes.NewReader(body), int64(len(body)))
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
2015-08-21 08:13:31 +00:00
|
|
|
var tempName string
|
|
|
|
var sig []byte
|
2014-12-22 10:03:17 +00:00
|
|
|
|
|
|
|
// Iterate through the files in the archive.
|
2016-05-06 13:58:34 +00:00
|
|
|
i := 0
|
2014-12-22 10:03:17 +00:00
|
|
|
for _, file := range archive.File {
|
2016-05-06 13:58:34 +00:00
|
|
|
if i >= maxArchiveMembers {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
i++
|
|
|
|
|
2016-05-06 14:14:19 +00:00
|
|
|
if file.UncompressedSize64 > maxBinarySize {
|
|
|
|
// We don't even want to try processing or skipping over files
|
|
|
|
// that are too large.
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2015-08-21 08:13:31 +00:00
|
|
|
inFile, err := file.Open()
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
2014-12-22 10:03:17 +00:00
|
|
|
|
2015-11-09 22:32:26 +00:00
|
|
|
err = archiveFileVisitor(dir, &tempName, &sig, file.Name, inFile)
|
2015-08-21 08:13:31 +00:00
|
|
|
inFile.Close()
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
2014-12-22 10:03:17 +00:00
|
|
|
|
2015-08-21 08:13:31 +00:00
|
|
|
if tempName != "" && sig != nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2014-05-02 08:01:09 +00:00
|
|
|
|
2016-05-06 13:30:35 +00:00
|
|
|
if err := verifyUpgrade(archiveName, tempName, sig); err != nil {
|
2015-08-21 08:13:31 +00:00
|
|
|
return "", err
|
|
|
|
}
|
2014-12-22 09:42:47 +00:00
|
|
|
|
2015-08-21 08:13:31 +00:00
|
|
|
return tempName, nil
|
|
|
|
}
|
2014-12-22 10:03:17 +00:00
|
|
|
|
2015-08-21 08:13:31 +00:00
|
|
|
// archiveFileVisitor is called for each file in an archive. It may set
|
|
|
|
// tempFile and signature.
|
2015-11-09 22:32:26 +00:00
|
|
|
func archiveFileVisitor(dir string, tempFile *string, signature *[]byte, archivePath string, filedata io.Reader) error {
|
2015-08-21 08:13:31 +00:00
|
|
|
var err error
|
2015-11-09 22:32:26 +00:00
|
|
|
filename := path.Base(archivePath)
|
|
|
|
archiveDir := path.Dir(archivePath)
|
|
|
|
l.Debugf("considering file %s", archivePath)
|
2015-08-21 08:13:31 +00:00
|
|
|
switch filename {
|
|
|
|
case "syncthing", "syncthing.exe":
|
2016-05-06 13:30:35 +00:00
|
|
|
archiveDirs := strings.Split(archiveDir, "/")
|
|
|
|
if len(archiveDirs) > 1 {
|
|
|
|
// Don't consider "syncthing" files found too deeply, as they may be
|
|
|
|
// other things.
|
|
|
|
return nil
|
|
|
|
}
|
2015-11-09 22:32:26 +00:00
|
|
|
l.Debugf("found upgrade binary %s", archivePath)
|
2016-05-06 13:58:34 +00:00
|
|
|
*tempFile, err = writeBinary(dir, io.LimitReader(filedata, maxBinarySize))
|
2015-08-21 08:13:31 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2014-12-22 10:03:17 +00:00
|
|
|
}
|
|
|
|
|
2016-05-06 13:30:35 +00:00
|
|
|
case "release.sig":
|
2015-11-09 22:32:26 +00:00
|
|
|
l.Debugf("found signature %s", archivePath)
|
2016-05-06 13:58:34 +00:00
|
|
|
*signature, err = ioutil.ReadAll(io.LimitReader(filedata, maxSignatureSize))
|
2015-08-21 08:13:31 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2014-05-02 08:01:09 +00:00
|
|
|
}
|
|
|
|
}
|
2015-08-21 08:13:31 +00:00
|
|
|
|
|
|
|
return nil
|
2014-05-02 08:01:09 +00:00
|
|
|
}
|
2014-12-22 10:03:17 +00:00
|
|
|
|
2016-05-06 13:30:35 +00:00
|
|
|
func verifyUpgrade(archiveName, tempName string, sig []byte) error {
|
2015-08-21 08:13:31 +00:00
|
|
|
if tempName == "" {
|
|
|
|
return fmt.Errorf("no upgrade found")
|
|
|
|
}
|
|
|
|
if sig == nil {
|
|
|
|
return fmt.Errorf("no signature found")
|
|
|
|
}
|
|
|
|
|
2015-10-03 15:25:21 +00:00
|
|
|
l.Debugf("checking signature\n%s", sig)
|
2015-08-21 08:13:31 +00:00
|
|
|
|
|
|
|
fd, err := os.Open(tempName)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-05-06 13:30:35 +00:00
|
|
|
|
|
|
|
// Create a new reader that will serve reads from, in order:
|
|
|
|
//
|
|
|
|
// - the archive name ("syncthing-linux-amd64-v0.13.0-beta.4.tar.gz")
|
|
|
|
// followed by a newline
|
|
|
|
//
|
|
|
|
// - the temp file contents
|
|
|
|
//
|
|
|
|
// We then verify the release signature against the contents of this
|
|
|
|
// multireader. This ensures that it is not only a bonafide syncthing
|
2017-02-25 08:12:13 +00:00
|
|
|
// binary, but it is also of exactly the platform and version we expect.
|
2016-05-06 13:30:35 +00:00
|
|
|
|
|
|
|
mr := io.MultiReader(bytes.NewBufferString(archiveName+"\n"), fd)
|
|
|
|
err = signature.Verify(SigningKey, sig, mr)
|
2015-08-21 08:13:31 +00:00
|
|
|
fd.Close()
|
|
|
|
|
2014-12-22 10:03:17 +00:00
|
|
|
if err != nil {
|
2015-08-21 08:13:31 +00:00
|
|
|
os.Remove(tempName)
|
|
|
|
return err
|
2014-12-22 10:03:17 +00:00
|
|
|
}
|
|
|
|
|
2015-08-21 08:13:31 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func writeBinary(dir string, inFile io.Reader) (filename string, err error) {
|
|
|
|
// Write the binary to a temporary file.
|
2014-12-22 10:03:17 +00:00
|
|
|
|
2015-08-21 08:13:31 +00:00
|
|
|
outFile, err := ioutil.TempFile(dir, "syncthing")
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
2014-12-22 10:03:17 +00:00
|
|
|
|
2015-08-21 08:13:31 +00:00
|
|
|
_, err = io.Copy(outFile, inFile)
|
2014-12-22 10:03:17 +00:00
|
|
|
if err != nil {
|
|
|
|
os.Remove(outFile.Name())
|
2015-08-21 08:13:31 +00:00
|
|
|
return "", err
|
2014-12-22 10:03:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
err = outFile.Close()
|
|
|
|
if err != nil {
|
|
|
|
os.Remove(outFile.Name())
|
2015-08-21 08:13:31 +00:00
|
|
|
return "", err
|
2014-12-22 10:03:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
err = os.Chmod(outFile.Name(), os.FileMode(0755))
|
|
|
|
if err != nil {
|
|
|
|
os.Remove(outFile.Name())
|
2015-08-21 08:13:31 +00:00
|
|
|
return "", err
|
2014-12-22 10:03:17 +00:00
|
|
|
}
|
|
|
|
|
2015-08-21 08:13:31 +00:00
|
|
|
return outFile.Name(), nil
|
2014-12-22 10:03:17 +00:00
|
|
|
}
|