mirror of
https://github.com/octoleo/restic.git
synced 2024-11-22 12:55:18 +00:00
Vendor dependencies with go mod vendor
This commit is contained in:
parent
9c6e0c6eb9
commit
e0f68ec2c0
484
Gopkg.lock
generated
484
Gopkg.lock
generated
@ -1,484 +0,0 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:94e9caf404409a2990cfd22aca37d758494c098eff3e2c37fda1abed862e74dd"
|
||||
name = "bazil.org/fuse"
|
||||
packages = [
|
||||
".",
|
||||
"fs",
|
||||
"fuseutil",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "65cc252bf6691cb3c7014bcb2c8dc29de91e3a7e"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5c3894b2aa4d6bead0ceeea6831b305d62879c871780e7b76296ded1b004bc57"
|
||||
name = "cloud.google.com/go"
|
||||
packages = ["compute/metadata"]
|
||||
pruneopts = "UT"
|
||||
revision = "aad3f485ee528456e0768f20397b4d9dd941e755"
|
||||
version = "v0.25.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:46ea9487304f4b3c787f54483ecb13a338d686dcd670db0ab1a112ed0ae2128e"
|
||||
name = "github.com/Azure/azure-sdk-for-go"
|
||||
packages = [
|
||||
"storage",
|
||||
"version",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "4e8cbbfb1aeab140cd0fa97fd16b64ee18c3ca6a"
|
||||
version = "v19.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:27d0cd1a78fc836f7c0f07749d029a5f7895c84ad066187b08b70e9d1830098e"
|
||||
name = "github.com/Azure/go-autorest"
|
||||
packages = [
|
||||
"autorest",
|
||||
"autorest/adal",
|
||||
"autorest/azure",
|
||||
"autorest/date",
|
||||
"logger",
|
||||
"version",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "dd94e014aaf16d1df746762e392aa201c1b4c461"
|
||||
version = "v10.15.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:2209584c0f7c9b68c23374e659357ab546e1b70eec2761f03280f69a8fd23d77"
|
||||
name = "github.com/cenkalti/backoff"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "2ea60e5f094469f9e65adb9cd103795b73ae743e"
|
||||
version = "v2.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:7cb4fdca4c251b3ef8027c90ea35f70c7b661a593b9eeae34753c65499098bb1"
|
||||
name = "github.com/cpuguy83/go-md2man"
|
||||
packages = ["md2man"]
|
||||
pruneopts = "UT"
|
||||
revision = "20f5889cbdc3c73dbd2862796665e7c465ade7d1"
|
||||
version = "v1.0.8"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:76dc72490af7174349349838f2fe118996381b31ea83243812a97e5a0fd5ed55"
|
||||
name = "github.com/dgrijalva/jwt-go"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e"
|
||||
version = "v3.2.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:6f9339c912bbdda81302633ad7e99a28dfa5a639c864061f1929510a9a64aa74"
|
||||
name = "github.com/dustin/go-humanize"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "9f541cc9db5d55bce703bd99987c9d5cb8eea45e"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:c7edfbb6320d6a93240d663dc52bca92bed4c116abe54c35679eec4e7cc2bd77"
|
||||
name = "github.com/elithrar/simple-scrypt"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "d150773194090feb6c897805a7bcea8d49544e2c"
|
||||
version = "v1.3.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:fe8a03a8222d5b913f256972933d26d24ad7c8286692a42943bc01633cc8fce3"
|
||||
name = "github.com/go-ini/ini"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "358ee7663966325963d4e8b2e1fbd570c5195153"
|
||||
version = "v1.38.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:15042ad3498153684d09f393bbaec6b216c8eec6d61f63dff711de7d64ed8861"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = ["proto"]
|
||||
pruneopts = "UT"
|
||||
revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:d2754cafcab0d22c13541618a8029a70a8959eb3525ff201fe971637e2274cd0"
|
||||
name = "github.com/google/go-cmp"
|
||||
packages = [
|
||||
"cmp",
|
||||
"cmp/cmpopts",
|
||||
"cmp/internal/diff",
|
||||
"cmp/internal/function",
|
||||
"cmp/internal/value",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "3af367b6b30c263d47e8895973edcca9a49cf029"
|
||||
version = "v0.2.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:870d441fe217b8e689d7949fef6e43efbc787e50f200cb1e70dbca9204a1d6be"
|
||||
name = "github.com/inconshreveable/mousetrap"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
|
||||
version = "v1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:190ff84d9b2ed6589088f178cba8edb4b8ecb334df4572421fb016be1ac20463"
|
||||
name = "github.com/juju/ratelimit"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "59fac5042749a5afb9af70e813da1dd5474f0167"
|
||||
version = "1.0.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:9cedee824c21326bd26950bd9e1ffe9dc4e7ca03dc8634d0e6f954ee6a383172"
|
||||
name = "github.com/kr/fs"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "1455def202f6e05b95cc7bfc7e8ae67ae5141eba"
|
||||
version = "v0.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:1faa76bd9bffce9c25eaca0597afb67bd05a21ae57fe4378154ce8855ef163d1"
|
||||
name = "github.com/kurin/blazer"
|
||||
packages = [
|
||||
"b2",
|
||||
"base",
|
||||
"internal/b2assets",
|
||||
"internal/b2types",
|
||||
"internal/blog",
|
||||
"x/window",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "caf65aa76491dc533bac68ad3243ce72fa4e0a0a"
|
||||
version = "v0.5.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:4e878df5f4e9fd625bf9c9aac77ef7cbfa4a74c01265505527c23470c0e40300"
|
||||
name = "github.com/marstr/guid"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "8bd9a64bf37eb297b492a4101fb28e80ac0b290f"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:d4d17353dbd05cb52a2a52b7fe1771883b682806f68db442b436294926bbfafb"
|
||||
name = "github.com/mattn/go-isatty"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "0360b2af4f38e8d38c7fce2a9f4e702702d73a39"
|
||||
version = "v0.0.3"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:95c73c666919be2843b955eafc83f58c136312b74f79c703152f4c4a95fd64dc"
|
||||
name = "github.com/minio/minio-go"
|
||||
packages = [
|
||||
".",
|
||||
"pkg/credentials",
|
||||
"pkg/encrypt",
|
||||
"pkg/s3signer",
|
||||
"pkg/s3utils",
|
||||
"pkg/set",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "70799fe8dae6ecfb6c7d7e9e048fce27f23a1992"
|
||||
version = "v6.0.5"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:8eb17c2ec4df79193ae65b621cd1c0c4697db3bc317fe6afdc76d7f2746abd05"
|
||||
name = "github.com/mitchellh/go-homedir"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "3864e76763d94a6df2f9960b16a20a33da9f9a66"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:928de5172dd3563964d1b88a4ee3775cf72e16f1efabb482ab6d0e0bab86ee69"
|
||||
name = "github.com/ncw/swift"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "b2a7479cf26fa841ff90dd932d0221cb5c50782d"
|
||||
version = "v1.0.39"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:40e195917a951a8bf867cd05de2a46aaf1806c50cf92eebf4c16f78cd196f747"
|
||||
name = "github.com/pkg/errors"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
|
||||
version = "v0.8.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:cfa0d7741863a0e1d30e0ccdd4b48a96a471cdb47892303de8b92c3713af3e77"
|
||||
name = "github.com/pkg/profile"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "5b67d428864e92711fcbd2f8629456121a56d91f"
|
||||
version = "v1.2.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:23ed92ba5d90a2dfe817f3895027ccef796e79c30be5125d48e17afdcc395d73"
|
||||
name = "github.com/pkg/sftp"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "57673e38ea946592a59c26592b7e6fbda646975b"
|
||||
version = "v1.8.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:0d67664e93e366f072ac9672feea29bfc63c9f90f005e9e8a0df1954153f5a14"
|
||||
name = "github.com/pkg/xattr"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "ae385d07bb53f092fcc7daaf738d8513df084931"
|
||||
version = "v0.3.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:13ecc4000f49cf0aa3ee56fffcc93119c8edffacfff08674c80d2757d8c33a83"
|
||||
name = "github.com/restic/chunker"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "db83917be3b88cc307464b7d8a221c173e34a0db"
|
||||
version = "v0.2.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:8bc629776d035c003c7814d4369521afe67fdb8efc4b5f66540d29343b98cf23"
|
||||
name = "github.com/russross/blackfriday"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "55d61fa8aa702f59229e6cff85793c22e580eaf5"
|
||||
version = "v1.5.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:274f67cb6fed9588ea2521ecdac05a6d62a8c51c074c1fccc6a49a40ba80e925"
|
||||
name = "github.com/satori/go.uuid"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:d867dfa6751c8d7a435821ad3b736310c2ed68945d05b50fb9d23aee0540c8cc"
|
||||
name = "github.com/sirupsen/logrus"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "3e01752db0189b9157070a0e1668a620f9a85da2"
|
||||
version = "v1.0.6"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:e01b05ba901239c783dfe56450bcde607fc858908529868259c9a8765dc176d0"
|
||||
name = "github.com/spf13/cobra"
|
||||
packages = [
|
||||
".",
|
||||
"doc",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385"
|
||||
version = "v0.0.3"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:9424f440bba8f7508b69414634aef3b2b3a877e522d8a4624692412805407bb7"
|
||||
name = "github.com/spf13/pflag"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "583c0c0531f06d5278b7d917446061adc344b5cd"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:6cd85da60627124f96cf2d1ced7ca729f7391bb4616e26f702b8c0b1a56b7a30"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = [
|
||||
"argon2",
|
||||
"blake2b",
|
||||
"cast5",
|
||||
"curve25519",
|
||||
"ed25519",
|
||||
"ed25519/internal/edwards25519",
|
||||
"internal/chacha20",
|
||||
"internal/subtle",
|
||||
"openpgp",
|
||||
"openpgp/armor",
|
||||
"openpgp/elgamal",
|
||||
"openpgp/errors",
|
||||
"openpgp/packet",
|
||||
"openpgp/s2k",
|
||||
"pbkdf2",
|
||||
"poly1305",
|
||||
"scrypt",
|
||||
"ssh",
|
||||
"ssh/terminal",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "c126467f60eb25f8f27e5a981f32a87e3965053f"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:8356aa7bdcb10a210b814b64ff76d61de7c36ac4cb6263de3af5e3e2e546956d"
|
||||
name = "golang.org/x/net"
|
||||
packages = [
|
||||
"context",
|
||||
"context/ctxhttp",
|
||||
"http/httpguts",
|
||||
"http2",
|
||||
"http2/hpack",
|
||||
"idna",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "32f9bdbd7df18e8641d215e7ea68be88b971feb0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:bea0314c10bd362ab623af4880d853b5bad3b63d0ab9945c47e461b8d04203ed"
|
||||
name = "golang.org/x/oauth2"
|
||||
packages = [
|
||||
".",
|
||||
"google",
|
||||
"internal",
|
||||
"jws",
|
||||
"jwt",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "3d292e4d0cdc3a0113e6d207bb137145ef1de42f"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:39ebcc2b11457b703ae9ee2e8cca0f68df21969c6102cb3b705f76cca0ea0239"
|
||||
name = "golang.org/x/sync"
|
||||
packages = ["errgroup"]
|
||||
pruneopts = "UT"
|
||||
revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:a220a85c72a6cb7339c412cb2b117019a7fd94007cdfffb3b5b1d058227a2bf8"
|
||||
name = "golang.org/x/sys"
|
||||
packages = [
|
||||
"cpu",
|
||||
"unix",
|
||||
"windows",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "bd9dbc187b6e1dacfdd2722a87e83093c2d7bd6e"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:e5a8511f063c38c51ab9ab80e718e9149f692652aeb4e393a8c020dd1bf38ca2"
|
||||
name = "golang.org/x/text"
|
||||
packages = [
|
||||
"collate",
|
||||
"collate/build",
|
||||
"encoding",
|
||||
"encoding/internal",
|
||||
"encoding/internal/identifier",
|
||||
"encoding/unicode",
|
||||
"internal/colltab",
|
||||
"internal/gen",
|
||||
"internal/tag",
|
||||
"internal/triegen",
|
||||
"internal/ucd",
|
||||
"internal/utf8internal",
|
||||
"language",
|
||||
"runes",
|
||||
"secure/bidirule",
|
||||
"transform",
|
||||
"unicode/bidi",
|
||||
"unicode/cldr",
|
||||
"unicode/norm",
|
||||
"unicode/rangetable",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:fb983acae7bd9c3ed9aadc1b1241d9e559ed21dbf84c17a0dda663ca169ccd69"
|
||||
name = "google.golang.org/api"
|
||||
packages = [
|
||||
"gensupport",
|
||||
"googleapi",
|
||||
"googleapi/internal/uritemplates",
|
||||
"storage/v1",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "31ca0e01cd791f07750cb23fc99327721f753290"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:c8907869850adaa8bd7631887948d0684f3787d0912f1c01ab72581a6c34432e"
|
||||
name = "google.golang.org/appengine"
|
||||
packages = [
|
||||
".",
|
||||
"internal",
|
||||
"internal/app_identity",
|
||||
"internal/base",
|
||||
"internal/datastore",
|
||||
"internal/log",
|
||||
"internal/modules",
|
||||
"internal/remote_api",
|
||||
"internal/urlfetch",
|
||||
"urlfetch",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "b1f26356af11148e710935ed1ac8a7f5702c7612"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "v2"
|
||||
digest = "1:5bb148b78468350091db2ffbb2370f35cc6dcd74d9378a31b1c7b86ff7528f08"
|
||||
name = "gopkg.in/tomb.v2"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "d5d1b5820637886def9eef33e03a27a9f166942c"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:342378ac4dcb378a5448dd723f0784ae519383532f5e70ade24132c4c8693202"
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
|
||||
version = "v2.2.1"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
input-imports = [
|
||||
"bazil.org/fuse",
|
||||
"bazil.org/fuse/fs",
|
||||
"github.com/Azure/azure-sdk-for-go/storage",
|
||||
"github.com/cenkalti/backoff",
|
||||
"github.com/elithrar/simple-scrypt",
|
||||
"github.com/google/go-cmp/cmp",
|
||||
"github.com/google/go-cmp/cmp/cmpopts",
|
||||
"github.com/juju/ratelimit",
|
||||
"github.com/kurin/blazer/b2",
|
||||
"github.com/mattn/go-isatty",
|
||||
"github.com/minio/minio-go",
|
||||
"github.com/minio/minio-go/pkg/credentials",
|
||||
"github.com/ncw/swift",
|
||||
"github.com/pkg/errors",
|
||||
"github.com/pkg/profile",
|
||||
"github.com/pkg/sftp",
|
||||
"github.com/pkg/xattr",
|
||||
"github.com/restic/chunker",
|
||||
"github.com/spf13/cobra",
|
||||
"github.com/spf13/cobra/doc",
|
||||
"github.com/spf13/pflag",
|
||||
"golang.org/x/crypto/openpgp",
|
||||
"golang.org/x/crypto/poly1305",
|
||||
"golang.org/x/crypto/scrypt",
|
||||
"golang.org/x/crypto/ssh/terminal",
|
||||
"golang.org/x/net/context",
|
||||
"golang.org/x/net/context/ctxhttp",
|
||||
"golang.org/x/net/http2",
|
||||
"golang.org/x/oauth2/google",
|
||||
"golang.org/x/sync/errgroup",
|
||||
"golang.org/x/sys/unix",
|
||||
"golang.org/x/text/encoding/unicode",
|
||||
"google.golang.org/api/googleapi",
|
||||
"google.golang.org/api/storage/v1",
|
||||
"gopkg.in/tomb.v2",
|
||||
]
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
25
Gopkg.toml
25
Gopkg.toml
@ -1,25 +0,0 @@
|
||||
|
||||
# Gopkg.toml example
|
||||
#
|
||||
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
|
||||
# for detailed Gopkg.toml documentation.
|
||||
#
|
||||
# required = ["github.com/user/thing/cmd/thing"]
|
||||
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project"
|
||||
# version = "1.0.0"
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project2"
|
||||
# branch = "dev"
|
||||
# source = "github.com/myfork/project2"
|
||||
#
|
||||
# [[override]]
|
||||
# name = "github.com/x/y"
|
||||
# version = "2.4.0"
|
||||
|
||||
[prune]
|
||||
unused-packages = true
|
||||
go-tests = true
|
52
go.mod
Normal file
52
go.mod
Normal file
@ -0,0 +1,52 @@
|
||||
module github.com/restic/restic
|
||||
|
||||
require (
|
||||
bazil.org/fuse v0.0.0-20180421153158-65cc252bf669
|
||||
cloud.google.com/go v0.25.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go v19.1.0+incompatible
|
||||
github.com/Azure/go-autorest v10.15.0+incompatible // indirect
|
||||
github.com/cenkalti/backoff v2.0.0+incompatible
|
||||
github.com/cpuguy83/go-md2man v1.0.8 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect
|
||||
github.com/dnaeon/go-vcr v0.0.0-20180814043457-aafff18a5cc2 // indirect
|
||||
github.com/elithrar/simple-scrypt v1.3.0
|
||||
github.com/go-ini/ini v1.38.1 // indirect
|
||||
github.com/golang/protobuf v1.1.0 // indirect
|
||||
github.com/google/go-cmp v0.2.0
|
||||
github.com/gopherjs/gopherjs v0.0.0-20180628210949-0892b62f0d9f // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/jtolds/gls v4.2.1+incompatible // indirect
|
||||
github.com/juju/ratelimit v1.0.1
|
||||
github.com/kr/fs v0.1.0 // indirect
|
||||
github.com/kurin/blazer v0.5.1
|
||||
github.com/marstr/guid v1.1.0 // indirect
|
||||
github.com/mattn/go-isatty v0.0.3
|
||||
github.com/minio/minio-go v6.0.5+incompatible
|
||||
github.com/mitchellh/go-homedir v0.0.0-20180523094522-3864e76763d9 // indirect
|
||||
github.com/ncw/swift v1.0.39
|
||||
github.com/pkg/errors v0.8.0
|
||||
github.com/pkg/profile v1.2.1
|
||||
github.com/pkg/sftp v1.8.0
|
||||
github.com/pkg/xattr v0.3.1
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/restic/chunker v0.2.0
|
||||
github.com/russross/blackfriday v1.5.1 // indirect
|
||||
github.com/satori/go.uuid v1.2.0 // indirect
|
||||
github.com/smartystreets/assertions v0.0.0-20180803164922-886ec427f6b9 // indirect
|
||||
github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a // indirect
|
||||
github.com/spf13/cobra v0.0.3
|
||||
github.com/spf13/pflag v1.0.1 // indirect
|
||||
github.com/stretchr/testify v1.2.2 // indirect
|
||||
golang.org/x/crypto v0.0.0-20180723164146-c126467f60eb
|
||||
golang.org/x/net v0.0.0-20180801174033-32f9bdbd7df1
|
||||
golang.org/x/oauth2 v0.0.0-20180724155351-3d292e4d0cdc
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f
|
||||
golang.org/x/sys v0.0.0-20180727230415-bd9dbc187b6e
|
||||
golang.org/x/text v0.3.0
|
||||
google.golang.org/api v0.0.0-20180730000901-31ca0e01cd79
|
||||
google.golang.org/appengine v1.1.0 // indirect
|
||||
gopkg.in/ini.v1 v1.38.1 // indirect
|
||||
gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637
|
||||
gopkg.in/yaml.v2 v2.2.1 // indirect
|
||||
)
|
98
go.sum
Normal file
98
go.sum
Normal file
@ -0,0 +1,98 @@
|
||||
bazil.org/fuse v0.0.0-20180421153158-65cc252bf669 h1:FNCRpXiquG1aoyqcIWVFmpTSKVcx2bQD38uZZeGtdlw=
|
||||
bazil.org/fuse v0.0.0-20180421153158-65cc252bf669/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
|
||||
cloud.google.com/go v0.25.0 h1:6vD6xZTc8Jo6To8gHxFDRVsMvWFDgY3rugNszcDalN8=
|
||||
cloud.google.com/go v0.25.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/Azure/azure-sdk-for-go v19.1.0+incompatible h1:ysqLW+tqZjJWOTE74heH/pDRbr4vlN3yV+dqQYgpyxw=
|
||||
github.com/Azure/azure-sdk-for-go v19.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/go-autorest v10.15.0+incompatible h1:GqDO/9r+7tmkU8HI/DNLVkeucncU8jCul1DLeTaA3GI=
|
||||
github.com/Azure/go-autorest v10.15.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
github.com/cenkalti/backoff v2.0.0+incompatible h1:5IIPUHhlnUZbcHQsQou5k1Tn58nJkeJL9U+ig5CHJbY=
|
||||
github.com/cenkalti/backoff v2.0.0+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
|
||||
github.com/cpuguy83/go-md2man v1.0.8 h1:DwoNytLphI8hzS2Af4D0dfaEaiSq2bN05mEm4R6vf8M=
|
||||
github.com/cpuguy83/go-md2man v1.0.8/go.mod h1:N6JayAiVKtlHSnuTCeuLSQVs75hb8q+dYQLjr7cDsKY=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dnaeon/go-vcr v0.0.0-20180814043457-aafff18a5cc2 h1:G9/PqfhOrt8JXnw0DGTfVoOkKHDhOlEZqhE/cu+NvQM=
|
||||
github.com/dnaeon/go-vcr v0.0.0-20180814043457-aafff18a5cc2/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
|
||||
github.com/elithrar/simple-scrypt v1.3.0 h1:KIlOlxdoQf9JWKl5lMAJ28SY2URB0XTRDn2TckyzAZg=
|
||||
github.com/elithrar/simple-scrypt v1.3.0/go.mod h1:U2XQRI95XHY0St410VE3UjT7vuKb1qPwrl/EJwEqnZo=
|
||||
github.com/go-ini/ini v1.38.1 h1:hbtfM8emWUVo9GnXSloXYyFbXxZ+tG6sbepSStoe1FY=
|
||||
github.com/go-ini/ini v1.38.1/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
||||
github.com/golang/protobuf v1.1.0 h1:0iH4Ffd/meGoXqF2lSAhZHt8X+cPgkfn/cb6Cce5Vpc=
|
||||
github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20180628210949-0892b62f0d9f h1:FDM3EtwZLyhW48YRiyqjivNlNZjAObv4xt4NnJaU+NQ=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20180628210949-0892b62f0d9f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpRVWLVmUEE=
|
||||
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/juju/ratelimit v1.0.1 h1:+7AIFJVQ0EQgq/K9+0Krm7m530Du7tIz0METWzN0RgY=
|
||||
github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk=
|
||||
github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
|
||||
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||
github.com/kurin/blazer v0.5.1 h1:mBc4i1uhHJEqU0KvzOgpMHhkwf+EcXvxjWEUS7HG+eY=
|
||||
github.com/kurin/blazer v0.5.1/go.mod h1:4FCXMUWo9DllR2Do4TtBd377ezyAJ51vB5uTBjt0pGU=
|
||||
github.com/marstr/guid v1.1.0 h1:/M4H/1G4avsieL6BbUwCOBzulmoeKVP5ux/3mQNnbyI=
|
||||
github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
|
||||
github.com/mattn/go-isatty v0.0.3 h1:ns/ykhmWi7G9O+8a448SecJU3nSMBXJfqQkl0upE1jI=
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/minio/minio-go v6.0.5+incompatible h1:qxQQW40lV2vuE9i6yYmt90GSJlT1YrMenWrjM6nZh0Q=
|
||||
github.com/minio/minio-go v6.0.5+incompatible/go.mod h1:7guKYtitv8dktvNUGrhzmNlA5wrAABTQXCoesZdFQO8=
|
||||
github.com/mitchellh/go-homedir v0.0.0-20180523094522-3864e76763d9 h1:Y94YB7jrsihrbGSqRNMwRWJ2/dCxr0hdC2oPRohkx0A=
|
||||
github.com/mitchellh/go-homedir v0.0.0-20180523094522-3864e76763d9/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/ncw/swift v1.0.39 h1:kKWP/n50ohzUiB5m8/Inh0Pi5ftslc1UIyFSmTstSrM=
|
||||
github.com/ncw/swift v1.0.39/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
|
||||
github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/profile v1.2.1 h1:F++O52m40owAmADcojzM+9gyjmMOY/T4oYJkgFDH8RE=
|
||||
github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
|
||||
github.com/pkg/sftp v1.8.0 h1:SJ2EX5aeifvl4zzbci3urQbr5p7Xc/A7/Ia9T8ahhNM=
|
||||
github.com/pkg/sftp v1.8.0/go.mod h1:NxmoDg/QLVWluQDUYG7XBZTLUpKeFa8e3aMf1BfjyHk=
|
||||
github.com/pkg/xattr v0.3.1 h1:6ceg5jxT3cH4lM5n8S2PmiNeOv61MK08yvvYJwyrPH0=
|
||||
github.com/pkg/xattr v0.3.1/go.mod h1:CBdxFOf0VLbaj6HKuP2ITOVV7NY6ycPKgIgnSx2ZNVs=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/restic/chunker v0.2.0 h1:GjvmvFuv2mx0iekZs+iAlrioo2UtgsGSSplvoXaVHDU=
|
||||
github.com/restic/chunker v0.2.0/go.mod h1:VdjruEj+7BU1ZZTW8Qqi1exxRx2Omf2JH0NsUEkQ29s=
|
||||
github.com/russross/blackfriday v1.5.1/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
|
||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||
github.com/smartystreets/assertions v0.0.0-20180803164922-886ec427f6b9 h1:lXQ+j+KwZcbwrbgU0Rp4Eglg3EJLHbuZU3BbOqAGBmg=
|
||||
github.com/smartystreets/assertions v0.0.0-20180803164922-886ec427f6b9/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a h1:JSvGDIbmil4Ui/dDdFBExb7/cmkNjyX5F97oglmvCDo=
|
||||
github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
|
||||
github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/pflag v1.0.1 h1:aCvUg6QPl3ibpQUxyLkrEkCHtPqYJL4x9AuhqVqFis4=
|
||||
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
golang.org/x/crypto v0.0.0-20180723164146-c126467f60eb h1:Ah9YqXLj6fEgeKqcmBuLCbAsrF3ScD7dJ/bYM0C6tXI=
|
||||
golang.org/x/crypto v0.0.0-20180723164146-c126467f60eb/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/net v0.0.0-20180801174033-32f9bdbd7df1 h1:YD10LevZEjN9f7mDrO+wRM0jig11AZCJ5Dg8/k+hDm0=
|
||||
golang.org/x/net v0.0.0-20180801174033-32f9bdbd7df1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/oauth2 v0.0.0-20180724155351-3d292e4d0cdc h1:3ElrZeO6IBP+M8kgu5YFwRo92Gqr+zBg3aooYQ6ziqU=
|
||||
golang.org/x/oauth2 v0.0.0-20180724155351-3d292e4d0cdc/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180525142821-c11f84a56e43/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180727230415-bd9dbc187b6e h1:3dQ4fR8k5KugjVKO0oqSd1odxuk2yaE2CIfxWP2WarQ=
|
||||
golang.org/x/sys v0.0.0-20180727230415-bd9dbc187b6e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
google.golang.org/api v0.0.0-20180730000901-31ca0e01cd79 h1:wCy2/9bhO1JeP2zZUALrj7ZdZuZoR4mRV57kTxjqRpo=
|
||||
google.golang.org/api v0.0.0-20180730000901-31ca0e01cd79/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/ini.v1 v1.38.1 h1:8E3nEICVJ6kxl6aTXYp77xYyObhw7YG9/avdj0r3vME=
|
||||
gopkg.in/ini.v1 v1.38.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 h1:yiW+nvdHb9LVqSHQBXfZCieqV4fzYhNBql77zY0ykqs=
|
||||
gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637/go.mod h1:BHsqpu/nsuzkT5BpiH1EMZPLyqSMM8JbIavyFACoFNk=
|
||||
gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
21
vendor/github.com/dustin/go-humanize/.travis.yml
generated
vendored
21
vendor/github.com/dustin/go-humanize/.travis.yml
generated
vendored
@ -1,21 +0,0 @@
|
||||
sudo: false
|
||||
language: go
|
||||
go:
|
||||
- 1.3.x
|
||||
- 1.5.x
|
||||
- 1.6.x
|
||||
- 1.7.x
|
||||
- 1.8.x
|
||||
- 1.9.x
|
||||
- master
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: master
|
||||
fast_finish: true
|
||||
install:
|
||||
- # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
|
||||
script:
|
||||
- go get -t -v ./...
|
||||
- diff -u <(echo -n) <(gofmt -d -s .)
|
||||
- go tool vet .
|
||||
- go test -v -race ./...
|
21
vendor/github.com/dustin/go-humanize/LICENSE
generated
vendored
21
vendor/github.com/dustin/go-humanize/LICENSE
generated
vendored
@ -1,21 +0,0 @@
|
||||
Copyright (c) 2005-2008 Dustin Sallings <dustin@spy.net>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
<http://www.opensource.org/licenses/mit-license.php>
|
124
vendor/github.com/dustin/go-humanize/README.markdown
generated
vendored
124
vendor/github.com/dustin/go-humanize/README.markdown
generated
vendored
@ -1,124 +0,0 @@
|
||||
# Humane Units [![Build Status](https://travis-ci.org/dustin/go-humanize.svg?branch=master)](https://travis-ci.org/dustin/go-humanize) [![GoDoc](https://godoc.org/github.com/dustin/go-humanize?status.svg)](https://godoc.org/github.com/dustin/go-humanize)
|
||||
|
||||
Just a few functions for helping humanize times and sizes.
|
||||
|
||||
`go get` it as `github.com/dustin/go-humanize`, import it as
|
||||
`"github.com/dustin/go-humanize"`, use it as `humanize`.
|
||||
|
||||
See [godoc](https://godoc.org/github.com/dustin/go-humanize) for
|
||||
complete documentation.
|
||||
|
||||
## Sizes
|
||||
|
||||
This lets you take numbers like `82854982` and convert them to useful
|
||||
strings like, `83 MB` or `79 MiB` (whichever you prefer).
|
||||
|
||||
Example:
|
||||
|
||||
```go
|
||||
fmt.Printf("That file is %s.", humanize.Bytes(82854982)) // That file is 83 MB.
|
||||
```
|
||||
|
||||
## Times
|
||||
|
||||
This lets you take a `time.Time` and spit it out in relative terms.
|
||||
For example, `12 seconds ago` or `3 days from now`.
|
||||
|
||||
Example:
|
||||
|
||||
```go
|
||||
fmt.Printf("This was touched %s.", humanize.Time(someTimeInstance)) // This was touched 7 hours ago.
|
||||
```
|
||||
|
||||
Thanks to Kyle Lemons for the time implementation from an IRC
|
||||
conversation one day. It's pretty neat.
|
||||
|
||||
## Ordinals
|
||||
|
||||
From a [mailing list discussion][odisc] where a user wanted to be able
|
||||
to label ordinals.
|
||||
|
||||
0 -> 0th
|
||||
1 -> 1st
|
||||
2 -> 2nd
|
||||
3 -> 3rd
|
||||
4 -> 4th
|
||||
[...]
|
||||
|
||||
Example:
|
||||
|
||||
```go
|
||||
fmt.Printf("You're my %s best friend.", humanize.Ordinal(193)) // You are my 193rd best friend.
|
||||
```
|
||||
|
||||
## Commas
|
||||
|
||||
Want to shove commas into numbers? Be my guest.
|
||||
|
||||
0 -> 0
|
||||
100 -> 100
|
||||
1000 -> 1,000
|
||||
1000000000 -> 1,000,000,000
|
||||
-100000 -> -100,000
|
||||
|
||||
Example:
|
||||
|
||||
```go
|
||||
fmt.Printf("You owe $%s.\n", humanize.Comma(6582491)) // You owe $6,582,491.
|
||||
```
|
||||
|
||||
## Ftoa
|
||||
|
||||
Nicer float64 formatter that removes trailing zeros.
|
||||
|
||||
```go
|
||||
fmt.Printf("%f", 2.24) // 2.240000
|
||||
fmt.Printf("%s", humanize.Ftoa(2.24)) // 2.24
|
||||
fmt.Printf("%f", 2.0) // 2.000000
|
||||
fmt.Printf("%s", humanize.Ftoa(2.0)) // 2
|
||||
```
|
||||
|
||||
## SI notation
|
||||
|
||||
Format numbers with [SI notation][sinotation].
|
||||
|
||||
Example:
|
||||
|
||||
```go
|
||||
humanize.SI(0.00000000223, "M") // 2.23 nM
|
||||
```
|
||||
|
||||
## English-specific functions
|
||||
|
||||
The following functions are in the `humanize/english` subpackage.
|
||||
|
||||
### Plurals
|
||||
|
||||
Simple English pluralization
|
||||
|
||||
```go
|
||||
english.PluralWord(1, "object", "") // object
|
||||
english.PluralWord(42, "object", "") // objects
|
||||
english.PluralWord(2, "bus", "") // buses
|
||||
english.PluralWord(99, "locus", "loci") // loci
|
||||
|
||||
english.Plural(1, "object", "") // 1 object
|
||||
english.Plural(42, "object", "") // 42 objects
|
||||
english.Plural(2, "bus", "") // 2 buses
|
||||
english.Plural(99, "locus", "loci") // 99 loci
|
||||
```
|
||||
|
||||
### Word series
|
||||
|
||||
Format comma-separated words lists with conjuctions:
|
||||
|
||||
```go
|
||||
english.WordSeries([]string{"foo"}, "and") // foo
|
||||
english.WordSeries([]string{"foo", "bar"}, "and") // foo and bar
|
||||
english.WordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar and baz
|
||||
|
||||
english.OxfordWordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar, and baz
|
||||
```
|
||||
|
||||
[odisc]: https://groups.google.com/d/topic/golang-nuts/l8NhI74jl-4/discussion
|
||||
[sinotation]: http://en.wikipedia.org/wiki/Metric_prefix
|
31
vendor/github.com/dustin/go-humanize/big.go
generated
vendored
31
vendor/github.com/dustin/go-humanize/big.go
generated
vendored
@ -1,31 +0,0 @@
|
||||
package humanize
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
)
|
||||
|
||||
// order of magnitude (to a max order)
|
||||
func oomm(n, b *big.Int, maxmag int) (float64, int) {
|
||||
mag := 0
|
||||
m := &big.Int{}
|
||||
for n.Cmp(b) >= 0 {
|
||||
n.DivMod(n, b, m)
|
||||
mag++
|
||||
if mag == maxmag && maxmag >= 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag
|
||||
}
|
||||
|
||||
// total order of magnitude
|
||||
// (same as above, but with no upper limit)
|
||||
func oom(n, b *big.Int) (float64, int) {
|
||||
mag := 0
|
||||
m := &big.Int{}
|
||||
for n.Cmp(b) >= 0 {
|
||||
n.DivMod(n, b, m)
|
||||
mag++
|
||||
}
|
||||
return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag
|
||||
}
|
173
vendor/github.com/dustin/go-humanize/bigbytes.go
generated
vendored
173
vendor/github.com/dustin/go-humanize/bigbytes.go
generated
vendored
@ -1,173 +0,0 @@
|
||||
package humanize
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
var (
|
||||
bigIECExp = big.NewInt(1024)
|
||||
|
||||
// BigByte is one byte in bit.Ints
|
||||
BigByte = big.NewInt(1)
|
||||
// BigKiByte is 1,024 bytes in bit.Ints
|
||||
BigKiByte = (&big.Int{}).Mul(BigByte, bigIECExp)
|
||||
// BigMiByte is 1,024 k bytes in bit.Ints
|
||||
BigMiByte = (&big.Int{}).Mul(BigKiByte, bigIECExp)
|
||||
// BigGiByte is 1,024 m bytes in bit.Ints
|
||||
BigGiByte = (&big.Int{}).Mul(BigMiByte, bigIECExp)
|
||||
// BigTiByte is 1,024 g bytes in bit.Ints
|
||||
BigTiByte = (&big.Int{}).Mul(BigGiByte, bigIECExp)
|
||||
// BigPiByte is 1,024 t bytes in bit.Ints
|
||||
BigPiByte = (&big.Int{}).Mul(BigTiByte, bigIECExp)
|
||||
// BigEiByte is 1,024 p bytes in bit.Ints
|
||||
BigEiByte = (&big.Int{}).Mul(BigPiByte, bigIECExp)
|
||||
// BigZiByte is 1,024 e bytes in bit.Ints
|
||||
BigZiByte = (&big.Int{}).Mul(BigEiByte, bigIECExp)
|
||||
// BigYiByte is 1,024 z bytes in bit.Ints
|
||||
BigYiByte = (&big.Int{}).Mul(BigZiByte, bigIECExp)
|
||||
)
|
||||
|
||||
var (
|
||||
bigSIExp = big.NewInt(1000)
|
||||
|
||||
// BigSIByte is one SI byte in big.Ints
|
||||
BigSIByte = big.NewInt(1)
|
||||
// BigKByte is 1,000 SI bytes in big.Ints
|
||||
BigKByte = (&big.Int{}).Mul(BigSIByte, bigSIExp)
|
||||
// BigMByte is 1,000 SI k bytes in big.Ints
|
||||
BigMByte = (&big.Int{}).Mul(BigKByte, bigSIExp)
|
||||
// BigGByte is 1,000 SI m bytes in big.Ints
|
||||
BigGByte = (&big.Int{}).Mul(BigMByte, bigSIExp)
|
||||
// BigTByte is 1,000 SI g bytes in big.Ints
|
||||
BigTByte = (&big.Int{}).Mul(BigGByte, bigSIExp)
|
||||
// BigPByte is 1,000 SI t bytes in big.Ints
|
||||
BigPByte = (&big.Int{}).Mul(BigTByte, bigSIExp)
|
||||
// BigEByte is 1,000 SI p bytes in big.Ints
|
||||
BigEByte = (&big.Int{}).Mul(BigPByte, bigSIExp)
|
||||
// BigZByte is 1,000 SI e bytes in big.Ints
|
||||
BigZByte = (&big.Int{}).Mul(BigEByte, bigSIExp)
|
||||
// BigYByte is 1,000 SI z bytes in big.Ints
|
||||
BigYByte = (&big.Int{}).Mul(BigZByte, bigSIExp)
|
||||
)
|
||||
|
||||
var bigBytesSizeTable = map[string]*big.Int{
|
||||
"b": BigByte,
|
||||
"kib": BigKiByte,
|
||||
"kb": BigKByte,
|
||||
"mib": BigMiByte,
|
||||
"mb": BigMByte,
|
||||
"gib": BigGiByte,
|
||||
"gb": BigGByte,
|
||||
"tib": BigTiByte,
|
||||
"tb": BigTByte,
|
||||
"pib": BigPiByte,
|
||||
"pb": BigPByte,
|
||||
"eib": BigEiByte,
|
||||
"eb": BigEByte,
|
||||
"zib": BigZiByte,
|
||||
"zb": BigZByte,
|
||||
"yib": BigYiByte,
|
||||
"yb": BigYByte,
|
||||
// Without suffix
|
||||
"": BigByte,
|
||||
"ki": BigKiByte,
|
||||
"k": BigKByte,
|
||||
"mi": BigMiByte,
|
||||
"m": BigMByte,
|
||||
"gi": BigGiByte,
|
||||
"g": BigGByte,
|
||||
"ti": BigTiByte,
|
||||
"t": BigTByte,
|
||||
"pi": BigPiByte,
|
||||
"p": BigPByte,
|
||||
"ei": BigEiByte,
|
||||
"e": BigEByte,
|
||||
"z": BigZByte,
|
||||
"zi": BigZiByte,
|
||||
"y": BigYByte,
|
||||
"yi": BigYiByte,
|
||||
}
|
||||
|
||||
var ten = big.NewInt(10)
|
||||
|
||||
func humanateBigBytes(s, base *big.Int, sizes []string) string {
|
||||
if s.Cmp(ten) < 0 {
|
||||
return fmt.Sprintf("%d B", s)
|
||||
}
|
||||
c := (&big.Int{}).Set(s)
|
||||
val, mag := oomm(c, base, len(sizes)-1)
|
||||
suffix := sizes[mag]
|
||||
f := "%.0f %s"
|
||||
if val < 10 {
|
||||
f = "%.1f %s"
|
||||
}
|
||||
|
||||
return fmt.Sprintf(f, val, suffix)
|
||||
|
||||
}
|
||||
|
||||
// BigBytes produces a human readable representation of an SI size.
|
||||
//
|
||||
// See also: ParseBigBytes.
|
||||
//
|
||||
// BigBytes(82854982) -> 83 MB
|
||||
func BigBytes(s *big.Int) string {
|
||||
sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
|
||||
return humanateBigBytes(s, bigSIExp, sizes)
|
||||
}
|
||||
|
||||
// BigIBytes produces a human readable representation of an IEC size.
|
||||
//
|
||||
// See also: ParseBigBytes.
|
||||
//
|
||||
// BigIBytes(82854982) -> 79 MiB
|
||||
func BigIBytes(s *big.Int) string {
|
||||
sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
|
||||
return humanateBigBytes(s, bigIECExp, sizes)
|
||||
}
|
||||
|
||||
// ParseBigBytes parses a string representation of bytes into the number
|
||||
// of bytes it represents.
|
||||
//
|
||||
// See also: BigBytes, BigIBytes.
|
||||
//
|
||||
// ParseBigBytes("42 MB") -> 42000000, nil
|
||||
// ParseBigBytes("42 mib") -> 44040192, nil
|
||||
func ParseBigBytes(s string) (*big.Int, error) {
|
||||
lastDigit := 0
|
||||
hasComma := false
|
||||
for _, r := range s {
|
||||
if !(unicode.IsDigit(r) || r == '.' || r == ',') {
|
||||
break
|
||||
}
|
||||
if r == ',' {
|
||||
hasComma = true
|
||||
}
|
||||
lastDigit++
|
||||
}
|
||||
|
||||
num := s[:lastDigit]
|
||||
if hasComma {
|
||||
num = strings.Replace(num, ",", "", -1)
|
||||
}
|
||||
|
||||
val := &big.Rat{}
|
||||
_, err := fmt.Sscanf(num, "%f", val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
extra := strings.ToLower(strings.TrimSpace(s[lastDigit:]))
|
||||
if m, ok := bigBytesSizeTable[extra]; ok {
|
||||
mv := (&big.Rat{}).SetInt(m)
|
||||
val.Mul(val, mv)
|
||||
rv := &big.Int{}
|
||||
rv.Div(val.Num(), val.Denom())
|
||||
return rv, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unhandled size name: %v", extra)
|
||||
}
|
143
vendor/github.com/dustin/go-humanize/bytes.go
generated
vendored
143
vendor/github.com/dustin/go-humanize/bytes.go
generated
vendored
@ -1,143 +0,0 @@
|
||||
package humanize
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
// IEC Sizes.
|
||||
// kibis of bits
|
||||
const (
|
||||
Byte = 1 << (iota * 10)
|
||||
KiByte
|
||||
MiByte
|
||||
GiByte
|
||||
TiByte
|
||||
PiByte
|
||||
EiByte
|
||||
)
|
||||
|
||||
// SI Sizes.
|
||||
const (
|
||||
IByte = 1
|
||||
KByte = IByte * 1000
|
||||
MByte = KByte * 1000
|
||||
GByte = MByte * 1000
|
||||
TByte = GByte * 1000
|
||||
PByte = TByte * 1000
|
||||
EByte = PByte * 1000
|
||||
)
|
||||
|
||||
var bytesSizeTable = map[string]uint64{
|
||||
"b": Byte,
|
||||
"kib": KiByte,
|
||||
"kb": KByte,
|
||||
"mib": MiByte,
|
||||
"mb": MByte,
|
||||
"gib": GiByte,
|
||||
"gb": GByte,
|
||||
"tib": TiByte,
|
||||
"tb": TByte,
|
||||
"pib": PiByte,
|
||||
"pb": PByte,
|
||||
"eib": EiByte,
|
||||
"eb": EByte,
|
||||
// Without suffix
|
||||
"": Byte,
|
||||
"ki": KiByte,
|
||||
"k": KByte,
|
||||
"mi": MiByte,
|
||||
"m": MByte,
|
||||
"gi": GiByte,
|
||||
"g": GByte,
|
||||
"ti": TiByte,
|
||||
"t": TByte,
|
||||
"pi": PiByte,
|
||||
"p": PByte,
|
||||
"ei": EiByte,
|
||||
"e": EByte,
|
||||
}
|
||||
|
||||
func logn(n, b float64) float64 {
|
||||
return math.Log(n) / math.Log(b)
|
||||
}
|
||||
|
||||
func humanateBytes(s uint64, base float64, sizes []string) string {
|
||||
if s < 10 {
|
||||
return fmt.Sprintf("%d B", s)
|
||||
}
|
||||
e := math.Floor(logn(float64(s), base))
|
||||
suffix := sizes[int(e)]
|
||||
val := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10
|
||||
f := "%.0f %s"
|
||||
if val < 10 {
|
||||
f = "%.1f %s"
|
||||
}
|
||||
|
||||
return fmt.Sprintf(f, val, suffix)
|
||||
}
|
||||
|
||||
// Bytes produces a human readable representation of an SI size.
|
||||
//
|
||||
// See also: ParseBytes.
|
||||
//
|
||||
// Bytes(82854982) -> 83 MB
|
||||
func Bytes(s uint64) string {
|
||||
sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB"}
|
||||
return humanateBytes(s, 1000, sizes)
|
||||
}
|
||||
|
||||
// IBytes produces a human readable representation of an IEC size.
|
||||
//
|
||||
// See also: ParseBytes.
|
||||
//
|
||||
// IBytes(82854982) -> 79 MiB
|
||||
func IBytes(s uint64) string {
|
||||
sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"}
|
||||
return humanateBytes(s, 1024, sizes)
|
||||
}
|
||||
|
||||
// ParseBytes parses a string representation of bytes into the number
|
||||
// of bytes it represents.
|
||||
//
|
||||
// See Also: Bytes, IBytes.
|
||||
//
|
||||
// ParseBytes("42 MB") -> 42000000, nil
|
||||
// ParseBytes("42 mib") -> 44040192, nil
|
||||
func ParseBytes(s string) (uint64, error) {
|
||||
lastDigit := 0
|
||||
hasComma := false
|
||||
for _, r := range s {
|
||||
if !(unicode.IsDigit(r) || r == '.' || r == ',') {
|
||||
break
|
||||
}
|
||||
if r == ',' {
|
||||
hasComma = true
|
||||
}
|
||||
lastDigit++
|
||||
}
|
||||
|
||||
num := s[:lastDigit]
|
||||
if hasComma {
|
||||
num = strings.Replace(num, ",", "", -1)
|
||||
}
|
||||
|
||||
f, err := strconv.ParseFloat(num, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
extra := strings.ToLower(strings.TrimSpace(s[lastDigit:]))
|
||||
if m, ok := bytesSizeTable[extra]; ok {
|
||||
f *= float64(m)
|
||||
if f >= math.MaxUint64 {
|
||||
return 0, fmt.Errorf("too large: %v", s)
|
||||
}
|
||||
return uint64(f), nil
|
||||
}
|
||||
|
||||
return 0, fmt.Errorf("unhandled size name: %v", extra)
|
||||
}
|
116
vendor/github.com/dustin/go-humanize/comma.go
generated
vendored
116
vendor/github.com/dustin/go-humanize/comma.go
generated
vendored
@ -1,116 +0,0 @@
|
||||
package humanize
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math"
|
||||
"math/big"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Comma produces a string form of the given number in base 10 with
|
||||
// commas after every three orders of magnitude.
|
||||
//
|
||||
// e.g. Comma(834142) -> 834,142
|
||||
func Comma(v int64) string {
|
||||
sign := ""
|
||||
|
||||
// Min int64 can't be negated to a usable value, so it has to be special cased.
|
||||
if v == math.MinInt64 {
|
||||
return "-9,223,372,036,854,775,808"
|
||||
}
|
||||
|
||||
if v < 0 {
|
||||
sign = "-"
|
||||
v = 0 - v
|
||||
}
|
||||
|
||||
parts := []string{"", "", "", "", "", "", ""}
|
||||
j := len(parts) - 1
|
||||
|
||||
for v > 999 {
|
||||
parts[j] = strconv.FormatInt(v%1000, 10)
|
||||
switch len(parts[j]) {
|
||||
case 2:
|
||||
parts[j] = "0" + parts[j]
|
||||
case 1:
|
||||
parts[j] = "00" + parts[j]
|
||||
}
|
||||
v = v / 1000
|
||||
j--
|
||||
}
|
||||
parts[j] = strconv.Itoa(int(v))
|
||||
return sign + strings.Join(parts[j:], ",")
|
||||
}
|
||||
|
||||
// Commaf produces a string form of the given number in base 10 with
|
||||
// commas after every three orders of magnitude.
|
||||
//
|
||||
// e.g. Commaf(834142.32) -> 834,142.32
|
||||
func Commaf(v float64) string {
|
||||
buf := &bytes.Buffer{}
|
||||
if v < 0 {
|
||||
buf.Write([]byte{'-'})
|
||||
v = 0 - v
|
||||
}
|
||||
|
||||
comma := []byte{','}
|
||||
|
||||
parts := strings.Split(strconv.FormatFloat(v, 'f', -1, 64), ".")
|
||||
pos := 0
|
||||
if len(parts[0])%3 != 0 {
|
||||
pos += len(parts[0]) % 3
|
||||
buf.WriteString(parts[0][:pos])
|
||||
buf.Write(comma)
|
||||
}
|
||||
for ; pos < len(parts[0]); pos += 3 {
|
||||
buf.WriteString(parts[0][pos : pos+3])
|
||||
buf.Write(comma)
|
||||
}
|
||||
buf.Truncate(buf.Len() - 1)
|
||||
|
||||
if len(parts) > 1 {
|
||||
buf.Write([]byte{'.'})
|
||||
buf.WriteString(parts[1])
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// CommafWithDigits works like the Commaf but limits the resulting
|
||||
// string to the given number of decimal places.
|
||||
//
|
||||
// e.g. CommafWithDigits(834142.32, 1) -> 834,142.3
|
||||
func CommafWithDigits(f float64, decimals int) string {
|
||||
return stripTrailingDigits(Commaf(f), decimals)
|
||||
}
|
||||
|
||||
// BigComma produces a string form of the given big.Int in base 10
|
||||
// with commas after every three orders of magnitude.
|
||||
func BigComma(b *big.Int) string {
|
||||
sign := ""
|
||||
if b.Sign() < 0 {
|
||||
sign = "-"
|
||||
b.Abs(b)
|
||||
}
|
||||
|
||||
athousand := big.NewInt(1000)
|
||||
c := (&big.Int{}).Set(b)
|
||||
_, m := oom(c, athousand)
|
||||
parts := make([]string, m+1)
|
||||
j := len(parts) - 1
|
||||
|
||||
mod := &big.Int{}
|
||||
for b.Cmp(athousand) >= 0 {
|
||||
b.DivMod(b, athousand, mod)
|
||||
parts[j] = strconv.FormatInt(mod.Int64(), 10)
|
||||
switch len(parts[j]) {
|
||||
case 2:
|
||||
parts[j] = "0" + parts[j]
|
||||
case 1:
|
||||
parts[j] = "00" + parts[j]
|
||||
}
|
||||
j--
|
||||
}
|
||||
parts[j] = strconv.Itoa(int(b.Int64()))
|
||||
return sign + strings.Join(parts[j:], ",")
|
||||
}
|
40
vendor/github.com/dustin/go-humanize/commaf.go
generated
vendored
40
vendor/github.com/dustin/go-humanize/commaf.go
generated
vendored
@ -1,40 +0,0 @@
|
||||
// +build go1.6
|
||||
|
||||
package humanize
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math/big"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// BigCommaf produces a string form of the given big.Float in base 10
|
||||
// with commas after every three orders of magnitude.
|
||||
func BigCommaf(v *big.Float) string {
|
||||
buf := &bytes.Buffer{}
|
||||
if v.Sign() < 0 {
|
||||
buf.Write([]byte{'-'})
|
||||
v.Abs(v)
|
||||
}
|
||||
|
||||
comma := []byte{','}
|
||||
|
||||
parts := strings.Split(v.Text('f', -1), ".")
|
||||
pos := 0
|
||||
if len(parts[0])%3 != 0 {
|
||||
pos += len(parts[0]) % 3
|
||||
buf.WriteString(parts[0][:pos])
|
||||
buf.Write(comma)
|
||||
}
|
||||
for ; pos < len(parts[0]); pos += 3 {
|
||||
buf.WriteString(parts[0][pos : pos+3])
|
||||
buf.Write(comma)
|
||||
}
|
||||
buf.Truncate(buf.Len() - 1)
|
||||
|
||||
if len(parts) > 1 {
|
||||
buf.Write([]byte{'.'})
|
||||
buf.WriteString(parts[1])
|
||||
}
|
||||
return buf.String()
|
||||
}
|
46
vendor/github.com/dustin/go-humanize/ftoa.go
generated
vendored
46
vendor/github.com/dustin/go-humanize/ftoa.go
generated
vendored
@ -1,46 +0,0 @@
|
||||
package humanize
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func stripTrailingZeros(s string) string {
|
||||
offset := len(s) - 1
|
||||
for offset > 0 {
|
||||
if s[offset] == '.' {
|
||||
offset--
|
||||
break
|
||||
}
|
||||
if s[offset] != '0' {
|
||||
break
|
||||
}
|
||||
offset--
|
||||
}
|
||||
return s[:offset+1]
|
||||
}
|
||||
|
||||
func stripTrailingDigits(s string, digits int) string {
|
||||
if i := strings.Index(s, "."); i >= 0 {
|
||||
if digits <= 0 {
|
||||
return s[:i]
|
||||
}
|
||||
i++
|
||||
if i+digits >= len(s) {
|
||||
return s
|
||||
}
|
||||
return s[:i+digits]
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Ftoa converts a float to a string with no trailing zeros.
|
||||
func Ftoa(num float64) string {
|
||||
return stripTrailingZeros(strconv.FormatFloat(num, 'f', 6, 64))
|
||||
}
|
||||
|
||||
// FtoaWithDigits converts a float to a string but limits the resulting string
|
||||
// to the given number of decimal places, and no trailing zeros.
|
||||
func FtoaWithDigits(num float64, digits int) string {
|
||||
return stripTrailingZeros(stripTrailingDigits(strconv.FormatFloat(num, 'f', 6, 64), digits))
|
||||
}
|
8
vendor/github.com/dustin/go-humanize/humanize.go
generated
vendored
8
vendor/github.com/dustin/go-humanize/humanize.go
generated
vendored
@ -1,8 +0,0 @@
|
||||
/*
|
||||
Package humanize converts boring ugly numbers to human-friendly strings and back.
|
||||
|
||||
Durations can be turned into strings such as "3 days ago", numbers
|
||||
representing sizes like 82854982 into useful strings like, "83 MB" or
|
||||
"79 MiB" (whichever you prefer).
|
||||
*/
|
||||
package humanize
|
192
vendor/github.com/dustin/go-humanize/number.go
generated
vendored
192
vendor/github.com/dustin/go-humanize/number.go
generated
vendored
@ -1,192 +0,0 @@
|
||||
package humanize
|
||||
|
||||
/*
|
||||
Slightly adapted from the source to fit go-humanize.
|
||||
|
||||
Author: https://github.com/gorhill
|
||||
Source: https://gist.github.com/gorhill/5285193
|
||||
|
||||
*/
|
||||
|
||||
import (
|
||||
"math"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
var (
|
||||
renderFloatPrecisionMultipliers = [...]float64{
|
||||
1,
|
||||
10,
|
||||
100,
|
||||
1000,
|
||||
10000,
|
||||
100000,
|
||||
1000000,
|
||||
10000000,
|
||||
100000000,
|
||||
1000000000,
|
||||
}
|
||||
|
||||
renderFloatPrecisionRounders = [...]float64{
|
||||
0.5,
|
||||
0.05,
|
||||
0.005,
|
||||
0.0005,
|
||||
0.00005,
|
||||
0.000005,
|
||||
0.0000005,
|
||||
0.00000005,
|
||||
0.000000005,
|
||||
0.0000000005,
|
||||
}
|
||||
)
|
||||
|
||||
// FormatFloat produces a formatted number as string based on the following user-specified criteria:
|
||||
// * thousands separator
|
||||
// * decimal separator
|
||||
// * decimal precision
|
||||
//
|
||||
// Usage: s := RenderFloat(format, n)
|
||||
// The format parameter tells how to render the number n.
|
||||
//
|
||||
// See examples: http://play.golang.org/p/LXc1Ddm1lJ
|
||||
//
|
||||
// Examples of format strings, given n = 12345.6789:
|
||||
// "#,###.##" => "12,345.67"
|
||||
// "#,###." => "12,345"
|
||||
// "#,###" => "12345,678"
|
||||
// "#\u202F###,##" => "12 345,68"
|
||||
// "#.###,###### => 12.345,678900
|
||||
// "" (aka default format) => 12,345.67
|
||||
//
|
||||
// The highest precision allowed is 9 digits after the decimal symbol.
|
||||
// There is also a version for integer number, FormatInteger(),
|
||||
// which is convenient for calls within template.
|
||||
func FormatFloat(format string, n float64) string {
|
||||
// Special cases:
|
||||
// NaN = "NaN"
|
||||
// +Inf = "+Infinity"
|
||||
// -Inf = "-Infinity"
|
||||
if math.IsNaN(n) {
|
||||
return "NaN"
|
||||
}
|
||||
if n > math.MaxFloat64 {
|
||||
return "Infinity"
|
||||
}
|
||||
if n < -math.MaxFloat64 {
|
||||
return "-Infinity"
|
||||
}
|
||||
|
||||
// default format
|
||||
precision := 2
|
||||
decimalStr := "."
|
||||
thousandStr := ","
|
||||
positiveStr := ""
|
||||
negativeStr := "-"
|
||||
|
||||
if len(format) > 0 {
|
||||
format := []rune(format)
|
||||
|
||||
// If there is an explicit format directive,
|
||||
// then default values are these:
|
||||
precision = 9
|
||||
thousandStr = ""
|
||||
|
||||
// collect indices of meaningful formatting directives
|
||||
formatIndx := []int{}
|
||||
for i, char := range format {
|
||||
if char != '#' && char != '0' {
|
||||
formatIndx = append(formatIndx, i)
|
||||
}
|
||||
}
|
||||
|
||||
if len(formatIndx) > 0 {
|
||||
// Directive at index 0:
|
||||
// Must be a '+'
|
||||
// Raise an error if not the case
|
||||
// index: 0123456789
|
||||
// +0.000,000
|
||||
// +000,000.0
|
||||
// +0000.00
|
||||
// +0000
|
||||
if formatIndx[0] == 0 {
|
||||
if format[formatIndx[0]] != '+' {
|
||||
panic("RenderFloat(): invalid positive sign directive")
|
||||
}
|
||||
positiveStr = "+"
|
||||
formatIndx = formatIndx[1:]
|
||||
}
|
||||
|
||||
// Two directives:
|
||||
// First is thousands separator
|
||||
// Raise an error if not followed by 3-digit
|
||||
// 0123456789
|
||||
// 0.000,000
|
||||
// 000,000.00
|
||||
if len(formatIndx) == 2 {
|
||||
if (formatIndx[1] - formatIndx[0]) != 4 {
|
||||
panic("RenderFloat(): thousands separator directive must be followed by 3 digit-specifiers")
|
||||
}
|
||||
thousandStr = string(format[formatIndx[0]])
|
||||
formatIndx = formatIndx[1:]
|
||||
}
|
||||
|
||||
// One directive:
|
||||
// Directive is decimal separator
|
||||
// The number of digit-specifier following the separator indicates wanted precision
|
||||
// 0123456789
|
||||
// 0.00
|
||||
// 000,0000
|
||||
if len(formatIndx) == 1 {
|
||||
decimalStr = string(format[formatIndx[0]])
|
||||
precision = len(format) - formatIndx[0] - 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// generate sign part
|
||||
var signStr string
|
||||
if n >= 0.000000001 {
|
||||
signStr = positiveStr
|
||||
} else if n <= -0.000000001 {
|
||||
signStr = negativeStr
|
||||
n = -n
|
||||
} else {
|
||||
signStr = ""
|
||||
n = 0.0
|
||||
}
|
||||
|
||||
// split number into integer and fractional parts
|
||||
intf, fracf := math.Modf(n + renderFloatPrecisionRounders[precision])
|
||||
|
||||
// generate integer part string
|
||||
intStr := strconv.FormatInt(int64(intf), 10)
|
||||
|
||||
// add thousand separator if required
|
||||
if len(thousandStr) > 0 {
|
||||
for i := len(intStr); i > 3; {
|
||||
i -= 3
|
||||
intStr = intStr[:i] + thousandStr + intStr[i:]
|
||||
}
|
||||
}
|
||||
|
||||
// no fractional part, we can leave now
|
||||
if precision == 0 {
|
||||
return signStr + intStr
|
||||
}
|
||||
|
||||
// generate fractional part
|
||||
fracStr := strconv.Itoa(int(fracf * renderFloatPrecisionMultipliers[precision]))
|
||||
// may need padding
|
||||
if len(fracStr) < precision {
|
||||
fracStr = "000000000000000"[:precision-len(fracStr)] + fracStr
|
||||
}
|
||||
|
||||
return signStr + intStr + decimalStr + fracStr
|
||||
}
|
||||
|
||||
// FormatInteger produces a formatted number as string.
|
||||
// See FormatFloat.
|
||||
func FormatInteger(format string, n int) string {
|
||||
return FormatFloat(format, float64(n))
|
||||
}
|
25
vendor/github.com/dustin/go-humanize/ordinals.go
generated
vendored
25
vendor/github.com/dustin/go-humanize/ordinals.go
generated
vendored
@ -1,25 +0,0 @@
|
||||
package humanize
|
||||
|
||||
import "strconv"
|
||||
|
||||
// Ordinal gives you the input number in a rank/ordinal format.
|
||||
//
|
||||
// Ordinal(3) -> 3rd
|
||||
func Ordinal(x int) string {
|
||||
suffix := "th"
|
||||
switch x % 10 {
|
||||
case 1:
|
||||
if x%100 != 11 {
|
||||
suffix = "st"
|
||||
}
|
||||
case 2:
|
||||
if x%100 != 12 {
|
||||
suffix = "nd"
|
||||
}
|
||||
case 3:
|
||||
if x%100 != 13 {
|
||||
suffix = "rd"
|
||||
}
|
||||
}
|
||||
return strconv.Itoa(x) + suffix
|
||||
}
|
123
vendor/github.com/dustin/go-humanize/si.go
generated
vendored
123
vendor/github.com/dustin/go-humanize/si.go
generated
vendored
@ -1,123 +0,0 @@
|
||||
package humanize
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math"
|
||||
"regexp"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
var siPrefixTable = map[float64]string{
|
||||
-24: "y", // yocto
|
||||
-21: "z", // zepto
|
||||
-18: "a", // atto
|
||||
-15: "f", // femto
|
||||
-12: "p", // pico
|
||||
-9: "n", // nano
|
||||
-6: "µ", // micro
|
||||
-3: "m", // milli
|
||||
0: "",
|
||||
3: "k", // kilo
|
||||
6: "M", // mega
|
||||
9: "G", // giga
|
||||
12: "T", // tera
|
||||
15: "P", // peta
|
||||
18: "E", // exa
|
||||
21: "Z", // zetta
|
||||
24: "Y", // yotta
|
||||
}
|
||||
|
||||
var revSIPrefixTable = revfmap(siPrefixTable)
|
||||
|
||||
// revfmap reverses the map and precomputes the power multiplier
|
||||
func revfmap(in map[float64]string) map[string]float64 {
|
||||
rv := map[string]float64{}
|
||||
for k, v := range in {
|
||||
rv[v] = math.Pow(10, k)
|
||||
}
|
||||
return rv
|
||||
}
|
||||
|
||||
var riParseRegex *regexp.Regexp
|
||||
|
||||
func init() {
|
||||
ri := `^([\-0-9.]+)\s?([`
|
||||
for _, v := range siPrefixTable {
|
||||
ri += v
|
||||
}
|
||||
ri += `]?)(.*)`
|
||||
|
||||
riParseRegex = regexp.MustCompile(ri)
|
||||
}
|
||||
|
||||
// ComputeSI finds the most appropriate SI prefix for the given number
|
||||
// and returns the prefix along with the value adjusted to be within
|
||||
// that prefix.
|
||||
//
|
||||
// See also: SI, ParseSI.
|
||||
//
|
||||
// e.g. ComputeSI(2.2345e-12) -> (2.2345, "p")
|
||||
func ComputeSI(input float64) (float64, string) {
|
||||
if input == 0 {
|
||||
return 0, ""
|
||||
}
|
||||
mag := math.Abs(input)
|
||||
exponent := math.Floor(logn(mag, 10))
|
||||
exponent = math.Floor(exponent/3) * 3
|
||||
|
||||
value := mag / math.Pow(10, exponent)
|
||||
|
||||
// Handle special case where value is exactly 1000.0
|
||||
// Should return 1 M instead of 1000 k
|
||||
if value == 1000.0 {
|
||||
exponent += 3
|
||||
value = mag / math.Pow(10, exponent)
|
||||
}
|
||||
|
||||
value = math.Copysign(value, input)
|
||||
|
||||
prefix := siPrefixTable[exponent]
|
||||
return value, prefix
|
||||
}
|
||||
|
||||
// SI returns a string with default formatting.
|
||||
//
|
||||
// SI uses Ftoa to format float value, removing trailing zeros.
|
||||
//
|
||||
// See also: ComputeSI, ParseSI.
|
||||
//
|
||||
// e.g. SI(1000000, "B") -> 1 MB
|
||||
// e.g. SI(2.2345e-12, "F") -> 2.2345 pF
|
||||
func SI(input float64, unit string) string {
|
||||
value, prefix := ComputeSI(input)
|
||||
return Ftoa(value) + " " + prefix + unit
|
||||
}
|
||||
|
||||
// SIWithDigits works like SI but limits the resulting string to the
|
||||
// given number of decimal places.
|
||||
//
|
||||
// e.g. SIWithDigits(1000000, 0, "B") -> 1 MB
|
||||
// e.g. SIWithDigits(2.2345e-12, 2, "F") -> 2.23 pF
|
||||
func SIWithDigits(input float64, decimals int, unit string) string {
|
||||
value, prefix := ComputeSI(input)
|
||||
return FtoaWithDigits(value, decimals) + " " + prefix + unit
|
||||
}
|
||||
|
||||
var errInvalid = errors.New("invalid input")
|
||||
|
||||
// ParseSI parses an SI string back into the number and unit.
|
||||
//
|
||||
// See also: SI, ComputeSI.
|
||||
//
|
||||
// e.g. ParseSI("2.2345 pF") -> (2.2345e-12, "F", nil)
|
||||
func ParseSI(input string) (float64, string, error) {
|
||||
found := riParseRegex.FindStringSubmatch(input)
|
||||
if len(found) != 4 {
|
||||
return 0, "", errInvalid
|
||||
}
|
||||
mag := revSIPrefixTable[found[2]]
|
||||
unit := found[3]
|
||||
|
||||
base, err := strconv.ParseFloat(found[1], 64)
|
||||
return base * mag, unit, err
|
||||
}
|
117
vendor/github.com/dustin/go-humanize/times.go
generated
vendored
117
vendor/github.com/dustin/go-humanize/times.go
generated
vendored
@ -1,117 +0,0 @@
|
||||
package humanize
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Seconds-based time units
|
||||
const (
|
||||
Day = 24 * time.Hour
|
||||
Week = 7 * Day
|
||||
Month = 30 * Day
|
||||
Year = 12 * Month
|
||||
LongTime = 37 * Year
|
||||
)
|
||||
|
||||
// Time formats a time into a relative string.
|
||||
//
|
||||
// Time(someT) -> "3 weeks ago"
|
||||
func Time(then time.Time) string {
|
||||
return RelTime(then, time.Now(), "ago", "from now")
|
||||
}
|
||||
|
||||
// A RelTimeMagnitude struct contains a relative time point at which
|
||||
// the relative format of time will switch to a new format string. A
|
||||
// slice of these in ascending order by their "D" field is passed to
|
||||
// CustomRelTime to format durations.
|
||||
//
|
||||
// The Format field is a string that may contain a "%s" which will be
|
||||
// replaced with the appropriate signed label (e.g. "ago" or "from
|
||||
// now") and a "%d" that will be replaced by the quantity.
|
||||
//
|
||||
// The DivBy field is the amount of time the time difference must be
|
||||
// divided by in order to display correctly.
|
||||
//
|
||||
// e.g. if D is 2*time.Minute and you want to display "%d minutes %s"
|
||||
// DivBy should be time.Minute so whatever the duration is will be
|
||||
// expressed in minutes.
|
||||
type RelTimeMagnitude struct {
|
||||
D time.Duration
|
||||
Format string
|
||||
DivBy time.Duration
|
||||
}
|
||||
|
||||
var defaultMagnitudes = []RelTimeMagnitude{
|
||||
{time.Second, "now", time.Second},
|
||||
{2 * time.Second, "1 second %s", 1},
|
||||
{time.Minute, "%d seconds %s", time.Second},
|
||||
{2 * time.Minute, "1 minute %s", 1},
|
||||
{time.Hour, "%d minutes %s", time.Minute},
|
||||
{2 * time.Hour, "1 hour %s", 1},
|
||||
{Day, "%d hours %s", time.Hour},
|
||||
{2 * Day, "1 day %s", 1},
|
||||
{Week, "%d days %s", Day},
|
||||
{2 * Week, "1 week %s", 1},
|
||||
{Month, "%d weeks %s", Week},
|
||||
{2 * Month, "1 month %s", 1},
|
||||
{Year, "%d months %s", Month},
|
||||
{18 * Month, "1 year %s", 1},
|
||||
{2 * Year, "2 years %s", 1},
|
||||
{LongTime, "%d years %s", Year},
|
||||
{math.MaxInt64, "a long while %s", 1},
|
||||
}
|
||||
|
||||
// RelTime formats a time into a relative string.
|
||||
//
|
||||
// It takes two times and two labels. In addition to the generic time
|
||||
// delta string (e.g. 5 minutes), the labels are used applied so that
|
||||
// the label corresponding to the smaller time is applied.
|
||||
//
|
||||
// RelTime(timeInPast, timeInFuture, "earlier", "later") -> "3 weeks earlier"
|
||||
func RelTime(a, b time.Time, albl, blbl string) string {
|
||||
return CustomRelTime(a, b, albl, blbl, defaultMagnitudes)
|
||||
}
|
||||
|
||||
// CustomRelTime formats a time into a relative string.
|
||||
//
|
||||
// It takes two times two labels and a table of relative time formats.
|
||||
// In addition to the generic time delta string (e.g. 5 minutes), the
|
||||
// labels are used applied so that the label corresponding to the
|
||||
// smaller time is applied.
|
||||
func CustomRelTime(a, b time.Time, albl, blbl string, magnitudes []RelTimeMagnitude) string {
|
||||
lbl := albl
|
||||
diff := b.Sub(a)
|
||||
|
||||
if a.After(b) {
|
||||
lbl = blbl
|
||||
diff = a.Sub(b)
|
||||
}
|
||||
|
||||
n := sort.Search(len(magnitudes), func(i int) bool {
|
||||
return magnitudes[i].D > diff
|
||||
})
|
||||
|
||||
if n >= len(magnitudes) {
|
||||
n = len(magnitudes) - 1
|
||||
}
|
||||
mag := magnitudes[n]
|
||||
args := []interface{}{}
|
||||
escaped := false
|
||||
for _, ch := range mag.Format {
|
||||
if escaped {
|
||||
switch ch {
|
||||
case 's':
|
||||
args = append(args, lbl)
|
||||
case 'd':
|
||||
args = append(args, diff/mag.DivBy)
|
||||
}
|
||||
escaped = false
|
||||
} else {
|
||||
escaped = ch == '%'
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf(mag.Format, args...)
|
||||
}
|
0
vendor/github.com/ncw/swift/travis_realserver.sh
generated
vendored
Executable file → Normal file
0
vendor/github.com/ncw/swift/travis_realserver.sh
generated
vendored
Executable file → Normal file
0
vendor/github.com/pkg/xattr/.travis.sh
generated
vendored
Executable file → Normal file
0
vendor/github.com/pkg/xattr/.travis.sh
generated
vendored
Executable file → Normal file
1
vendor/github.com/sirupsen/logrus/.gitignore
generated
vendored
1
vendor/github.com/sirupsen/logrus/.gitignore
generated
vendored
@ -1 +0,0 @@
|
||||
logrus
|
13
vendor/github.com/sirupsen/logrus/.travis.yml
generated
vendored
13
vendor/github.com/sirupsen/logrus/.travis.yml
generated
vendored
@ -1,13 +0,0 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.9.x
|
||||
- 1.10.x
|
||||
env:
|
||||
- GOMAXPROCS=4 GORACE=halt_on_error=1
|
||||
install:
|
||||
- go get github.com/stretchr/testify/assert
|
||||
- go get gopkg.in/gemnasium/logrus-airbrake-hook.v2
|
||||
- go get golang.org/x/sys/unix
|
||||
- go get golang.org/x/sys/windows
|
||||
script:
|
||||
- go test -race -v ./...
|
123
vendor/github.com/sirupsen/logrus/CHANGELOG.md
generated
vendored
123
vendor/github.com/sirupsen/logrus/CHANGELOG.md
generated
vendored
@ -1,123 +0,0 @@
|
||||
# 1.0.5
|
||||
|
||||
* Fix hooks race (#707)
|
||||
* Fix panic deadlock (#695)
|
||||
|
||||
# 1.0.4
|
||||
|
||||
* Fix race when adding hooks (#612)
|
||||
* Fix terminal check in AppEngine (#635)
|
||||
|
||||
# 1.0.3
|
||||
|
||||
* Replace example files with testable examples
|
||||
|
||||
# 1.0.2
|
||||
|
||||
* bug: quote non-string values in text formatter (#583)
|
||||
* Make (*Logger) SetLevel a public method
|
||||
|
||||
# 1.0.1
|
||||
|
||||
* bug: fix escaping in text formatter (#575)
|
||||
|
||||
# 1.0.0
|
||||
|
||||
* Officially changed name to lower-case
|
||||
* bug: colors on Windows 10 (#541)
|
||||
* bug: fix race in accessing level (#512)
|
||||
|
||||
# 0.11.5
|
||||
|
||||
* feature: add writer and writerlevel to entry (#372)
|
||||
|
||||
# 0.11.4
|
||||
|
||||
* bug: fix undefined variable on solaris (#493)
|
||||
|
||||
# 0.11.3
|
||||
|
||||
* formatter: configure quoting of empty values (#484)
|
||||
* formatter: configure quoting character (default is `"`) (#484)
|
||||
* bug: fix not importing io correctly in non-linux environments (#481)
|
||||
|
||||
# 0.11.2
|
||||
|
||||
* bug: fix windows terminal detection (#476)
|
||||
|
||||
# 0.11.1
|
||||
|
||||
* bug: fix tty detection with custom out (#471)
|
||||
|
||||
# 0.11.0
|
||||
|
||||
* performance: Use bufferpool to allocate (#370)
|
||||
* terminal: terminal detection for app-engine (#343)
|
||||
* feature: exit handler (#375)
|
||||
|
||||
# 0.10.0
|
||||
|
||||
* feature: Add a test hook (#180)
|
||||
* feature: `ParseLevel` is now case-insensitive (#326)
|
||||
* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308)
|
||||
* performance: avoid re-allocations on `WithFields` (#335)
|
||||
|
||||
# 0.9.0
|
||||
|
||||
* logrus/text_formatter: don't emit empty msg
|
||||
* logrus/hooks/airbrake: move out of main repository
|
||||
* logrus/hooks/sentry: move out of main repository
|
||||
* logrus/hooks/papertrail: move out of main repository
|
||||
* logrus/hooks/bugsnag: move out of main repository
|
||||
* logrus/core: run tests with `-race`
|
||||
* logrus/core: detect TTY based on `stderr`
|
||||
* logrus/core: support `WithError` on logger
|
||||
* logrus/core: Solaris support
|
||||
|
||||
# 0.8.7
|
||||
|
||||
* logrus/core: fix possible race (#216)
|
||||
* logrus/doc: small typo fixes and doc improvements
|
||||
|
||||
|
||||
# 0.8.6
|
||||
|
||||
* hooks/raven: allow passing an initialized client
|
||||
|
||||
# 0.8.5
|
||||
|
||||
* logrus/core: revert #208
|
||||
|
||||
# 0.8.4
|
||||
|
||||
* formatter/text: fix data race (#218)
|
||||
|
||||
# 0.8.3
|
||||
|
||||
* logrus/core: fix entry log level (#208)
|
||||
* logrus/core: improve performance of text formatter by 40%
|
||||
* logrus/core: expose `LevelHooks` type
|
||||
* logrus/core: add support for DragonflyBSD and NetBSD
|
||||
* formatter/text: print structs more verbosely
|
||||
|
||||
# 0.8.2
|
||||
|
||||
* logrus: fix more Fatal family functions
|
||||
|
||||
# 0.8.1
|
||||
|
||||
* logrus: fix not exiting on `Fatalf` and `Fatalln`
|
||||
|
||||
# 0.8.0
|
||||
|
||||
* logrus: defaults to stderr instead of stdout
|
||||
* hooks/sentry: add special field for `*http.Request`
|
||||
* formatter/text: ignore Windows for colors
|
||||
|
||||
# 0.7.3
|
||||
|
||||
* formatter/\*: allow configuration of timestamp layout
|
||||
|
||||
# 0.7.2
|
||||
|
||||
* formatter/text: Add configuration option for time format (#158)
|
21
vendor/github.com/sirupsen/logrus/LICENSE
generated
vendored
21
vendor/github.com/sirupsen/logrus/LICENSE
generated
vendored
@ -1,21 +0,0 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Simon Eskildsen
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
461
vendor/github.com/sirupsen/logrus/README.md
generated
vendored
461
vendor/github.com/sirupsen/logrus/README.md
generated
vendored
@ -1,461 +0,0 @@
|
||||
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/> [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus)
|
||||
|
||||
Logrus is a structured logger for Go (golang), completely API compatible with
|
||||
the standard library logger.
|
||||
|
||||
**Seeing weird case-sensitive problems?** It's in the past been possible to
|
||||
import Logrus as both upper- and lower-case. Due to the Go package environment,
|
||||
this caused issues in the community and we needed a standard. Some environments
|
||||
experienced problems with the upper-case variant, so the lower-case was decided.
|
||||
Everything using `logrus` will need to use the lower-case:
|
||||
`github.com/sirupsen/logrus`. Any package that isn't, should be changed.
|
||||
|
||||
To fix Glide, see [these
|
||||
comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437).
|
||||
For an in-depth explanation of the casing issue, see [this
|
||||
comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276).
|
||||
|
||||
**Are you interested in assisting in maintaining Logrus?** Currently I have a
|
||||
lot of obligations, and I am unable to provide Logrus with the maintainership it
|
||||
needs. If you'd like to help, please reach out to me at `simon at author's
|
||||
username dot com`.
|
||||
|
||||
Nicely color-coded in development (when a TTY is attached, otherwise just
|
||||
plain text):
|
||||
|
||||
![Colored](http://i.imgur.com/PY7qMwd.png)
|
||||
|
||||
With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
|
||||
or Splunk:
|
||||
|
||||
```json
|
||||
{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
|
||||
ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
|
||||
|
||||
{"level":"warning","msg":"The group's number increased tremendously!",
|
||||
"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
|
||||
|
||||
{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
|
||||
"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
|
||||
|
||||
{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
|
||||
"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
|
||||
|
||||
{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
|
||||
"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
|
||||
```
|
||||
|
||||
With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
|
||||
attached, the output is compatible with the
|
||||
[logfmt](http://godoc.org/github.com/kr/logfmt) format:
|
||||
|
||||
```text
|
||||
time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
|
||||
time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
|
||||
time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
|
||||
time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
|
||||
time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
|
||||
time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
|
||||
exit status 1
|
||||
```
|
||||
|
||||
#### Case-sensitivity
|
||||
|
||||
The organization's name was changed to lower-case--and this will not be changed
|
||||
back. If you are getting import conflicts due to case sensitivity, please use
|
||||
the lower-case import: `github.com/sirupsen/logrus`.
|
||||
|
||||
#### Example
|
||||
|
||||
The simplest way to use Logrus is simply the package-level exported logger:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.WithFields(log.Fields{
|
||||
"animal": "walrus",
|
||||
}).Info("A walrus appears")
|
||||
}
|
||||
```
|
||||
|
||||
Note that it's completely api-compatible with the stdlib logger, so you can
|
||||
replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"`
|
||||
and you'll now have the flexibility of Logrus. You can customize it all you
|
||||
want:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Log as JSON instead of the default ASCII formatter.
|
||||
log.SetFormatter(&log.JSONFormatter{})
|
||||
|
||||
// Output to stdout instead of the default stderr
|
||||
// Can be any io.Writer, see below for File example
|
||||
log.SetOutput(os.Stdout)
|
||||
|
||||
// Only log the warning severity or above.
|
||||
log.SetLevel(log.WarnLevel)
|
||||
}
|
||||
|
||||
func main() {
|
||||
log.WithFields(log.Fields{
|
||||
"animal": "walrus",
|
||||
"size": 10,
|
||||
}).Info("A group of walrus emerges from the ocean")
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
"omg": true,
|
||||
"number": 122,
|
||||
}).Warn("The group's number increased tremendously!")
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
"omg": true,
|
||||
"number": 100,
|
||||
}).Fatal("The ice breaks!")
|
||||
|
||||
// A common pattern is to re-use fields between logging statements by re-using
|
||||
// the logrus.Entry returned from WithFields()
|
||||
contextLogger := log.WithFields(log.Fields{
|
||||
"common": "this is a common field",
|
||||
"other": "I also should be logged always",
|
||||
})
|
||||
|
||||
contextLogger.Info("I'll be logged with common and other field")
|
||||
contextLogger.Info("Me too")
|
||||
}
|
||||
```
|
||||
|
||||
For more advanced usage such as logging to multiple locations from the same
|
||||
application, you can also create an instance of the `logrus` Logger:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Create a new instance of the logger. You can have any number of instances.
|
||||
var log = logrus.New()
|
||||
|
||||
func main() {
|
||||
// The API for setting attributes is a little different than the package level
|
||||
// exported logger. See Godoc.
|
||||
log.Out = os.Stdout
|
||||
|
||||
// You could set this to any `io.Writer` such as a file
|
||||
// file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666)
|
||||
// if err == nil {
|
||||
// log.Out = file
|
||||
// } else {
|
||||
// log.Info("Failed to log to file, using default stderr")
|
||||
// }
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"animal": "walrus",
|
||||
"size": 10,
|
||||
}).Info("A group of walrus emerges from the ocean")
|
||||
}
|
||||
```
|
||||
|
||||
#### Fields
|
||||
|
||||
Logrus encourages careful, structured logging through logging fields instead of
|
||||
long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
|
||||
to send event %s to topic %s with key %d")`, you should log the much more
|
||||
discoverable:
|
||||
|
||||
```go
|
||||
log.WithFields(log.Fields{
|
||||
"event": event,
|
||||
"topic": topic,
|
||||
"key": key,
|
||||
}).Fatal("Failed to send event")
|
||||
```
|
||||
|
||||
We've found this API forces you to think about logging in a way that produces
|
||||
much more useful logging messages. We've been in countless situations where just
|
||||
a single added field to a log statement that was already there would've saved us
|
||||
hours. The `WithFields` call is optional.
|
||||
|
||||
In general, with Logrus using any of the `printf`-family functions should be
|
||||
seen as a hint you should add a field, however, you can still use the
|
||||
`printf`-family functions with Logrus.
|
||||
|
||||
#### Default Fields
|
||||
|
||||
Often it's helpful to have fields _always_ attached to log statements in an
|
||||
application or parts of one. For example, you may want to always log the
|
||||
`request_id` and `user_ip` in the context of a request. Instead of writing
|
||||
`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on
|
||||
every line, you can create a `logrus.Entry` to pass around instead:
|
||||
|
||||
```go
|
||||
requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})
|
||||
requestLogger.Info("something happened on that request") # will log request_id and user_ip
|
||||
requestLogger.Warn("something not great happened")
|
||||
```
|
||||
|
||||
#### Hooks
|
||||
|
||||
You can add hooks for logging levels. For example to send errors to an exception
|
||||
tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
|
||||
multiple places simultaneously, e.g. syslog.
|
||||
|
||||
Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
|
||||
`init`:
|
||||
|
||||
```go
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
"gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "airbrake"
|
||||
logrus_syslog "github.com/sirupsen/logrus/hooks/syslog"
|
||||
"log/syslog"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
||||
// Use the Airbrake hook to report errors that have Error severity or above to
|
||||
// an exception tracker. You can create custom hooks, see the Hooks section.
|
||||
log.AddHook(airbrake.NewHook(123, "xyz", "production"))
|
||||
|
||||
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
|
||||
if err != nil {
|
||||
log.Error("Unable to connect to local syslog daemon")
|
||||
} else {
|
||||
log.AddHook(hook)
|
||||
}
|
||||
}
|
||||
```
|
||||
Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
|
||||
|
||||
A list of currently known of service hook can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks)
|
||||
|
||||
|
||||
#### Level logging
|
||||
|
||||
Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
|
||||
|
||||
```go
|
||||
log.Debug("Useful debugging information.")
|
||||
log.Info("Something noteworthy happened!")
|
||||
log.Warn("You should probably take a look at this.")
|
||||
log.Error("Something failed but I'm not quitting.")
|
||||
// Calls os.Exit(1) after logging
|
||||
log.Fatal("Bye.")
|
||||
// Calls panic() after logging
|
||||
log.Panic("I'm bailing.")
|
||||
```
|
||||
|
||||
You can set the logging level on a `Logger`, then it will only log entries with
|
||||
that severity or anything above it:
|
||||
|
||||
```go
|
||||
// Will log anything that is info or above (warn, error, fatal, panic). Default.
|
||||
log.SetLevel(log.InfoLevel)
|
||||
```
|
||||
|
||||
It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
|
||||
environment if your application has that.
|
||||
|
||||
#### Entries
|
||||
|
||||
Besides the fields added with `WithField` or `WithFields` some fields are
|
||||
automatically added to all logging events:
|
||||
|
||||
1. `time`. The timestamp when the entry was created.
|
||||
2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
|
||||
the `AddFields` call. E.g. `Failed to send event.`
|
||||
3. `level`. The logging level. E.g. `info`.
|
||||
|
||||
#### Environments
|
||||
|
||||
Logrus has no notion of environment.
|
||||
|
||||
If you wish for hooks and formatters to only be used in specific environments,
|
||||
you should handle that yourself. For example, if your application has a global
|
||||
variable `Environment`, which is a string representation of the environment you
|
||||
could do:
|
||||
|
||||
```go
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
init() {
|
||||
// do something here to set environment depending on an environment variable
|
||||
// or command-line flag
|
||||
if Environment == "production" {
|
||||
log.SetFormatter(&log.JSONFormatter{})
|
||||
} else {
|
||||
// The TextFormatter is default, you don't actually have to do this.
|
||||
log.SetFormatter(&log.TextFormatter{})
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This configuration is how `logrus` was intended to be used, but JSON in
|
||||
production is mostly only useful if you do log aggregation with tools like
|
||||
Splunk or Logstash.
|
||||
|
||||
#### Formatters
|
||||
|
||||
The built-in logging formatters are:
|
||||
|
||||
* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
|
||||
without colors.
|
||||
* *Note:* to force colored output when there is no TTY, set the `ForceColors`
|
||||
field to `true`. To force no colored output even if there is a TTY set the
|
||||
`DisableColors` field to `true`. For Windows, see
|
||||
[github.com/mattn/go-colorable](https://github.com/mattn/go-colorable).
|
||||
* When colors are enabled, levels are truncated to 4 characters by default. To disable
|
||||
truncation set the `DisableLevelTruncation` field to `true`.
|
||||
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter).
|
||||
* `logrus.JSONFormatter`. Logs fields as JSON.
|
||||
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter).
|
||||
|
||||
Third party logging formatters:
|
||||
|
||||
* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine.
|
||||
* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
|
||||
* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
|
||||
* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
|
||||
|
||||
You can define your formatter by implementing the `Formatter` interface,
|
||||
requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
|
||||
`Fields` type (`map[string]interface{}`) with all your fields as well as the
|
||||
default ones (see Entries section above):
|
||||
|
||||
```go
|
||||
type MyJSONFormatter struct {
|
||||
}
|
||||
|
||||
log.SetFormatter(new(MyJSONFormatter))
|
||||
|
||||
func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
|
||||
// Note this doesn't include Time, Level and Message which are available on
|
||||
// the Entry. Consult `godoc` on information about those fields or read the
|
||||
// source of the official loggers.
|
||||
serialized, err := json.Marshal(entry.Data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
||||
}
|
||||
return append(serialized, '\n'), nil
|
||||
}
|
||||
```
|
||||
|
||||
#### Logger as an `io.Writer`
|
||||
|
||||
Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
|
||||
|
||||
```go
|
||||
w := logger.Writer()
|
||||
defer w.Close()
|
||||
|
||||
srv := http.Server{
|
||||
// create a stdlib log.Logger that writes to
|
||||
// logrus.Logger.
|
||||
ErrorLog: log.New(w, "", 0),
|
||||
}
|
||||
```
|
||||
|
||||
Each line written to that writer will be printed the usual way, using formatters
|
||||
and hooks. The level for those entries is `info`.
|
||||
|
||||
This means that we can override the standard library logger easily:
|
||||
|
||||
```go
|
||||
logger := logrus.New()
|
||||
logger.Formatter = &logrus.JSONFormatter{}
|
||||
|
||||
// Use logrus for standard log output
|
||||
// Note that `log` here references stdlib's log
|
||||
// Not logrus imported under the name `log`.
|
||||
log.SetOutput(logger.Writer())
|
||||
```
|
||||
|
||||
#### Rotation
|
||||
|
||||
Log rotation is not provided with Logrus. Log rotation should be done by an
|
||||
external program (like `logrotate(8)`) that can compress and delete old log
|
||||
entries. It should not be a feature of the application-level logger.
|
||||
|
||||
#### Tools
|
||||
|
||||
| Tool | Description |
|
||||
| ---- | ----------- |
|
||||
|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
|
||||
|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) |
|
||||
|
||||
#### Testing
|
||||
|
||||
Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
|
||||
|
||||
* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook
|
||||
* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
|
||||
|
||||
```go
|
||||
import(
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus/hooks/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSomething(t*testing.T){
|
||||
logger, hook := test.NewNullLogger()
|
||||
logger.Error("Helloerror")
|
||||
|
||||
assert.Equal(t, 1, len(hook.Entries))
|
||||
assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level)
|
||||
assert.Equal(t, "Helloerror", hook.LastEntry().Message)
|
||||
|
||||
hook.Reset()
|
||||
assert.Nil(t, hook.LastEntry())
|
||||
}
|
||||
```
|
||||
|
||||
#### Fatal handlers
|
||||
|
||||
Logrus can register one or more functions that will be called when any `fatal`
|
||||
level message is logged. The registered handlers will be executed before
|
||||
logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need
|
||||
to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted.
|
||||
|
||||
```
|
||||
...
|
||||
handler := func() {
|
||||
// gracefully shutdown something...
|
||||
}
|
||||
logrus.RegisterExitHandler(handler)
|
||||
...
|
||||
```
|
||||
|
||||
#### Thread safety
|
||||
|
||||
By default, Logger is protected by a mutex for concurrent writes. The mutex is held when calling hooks and writing logs.
|
||||
If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
|
||||
|
||||
Situation when locking is not needed includes:
|
||||
|
||||
* You have no hooks registered, or hooks calling is already thread-safe.
|
||||
|
||||
* Writing to logger.Out is already thread-safe, for example:
|
||||
|
||||
1) logger.Out is protected by locks.
|
||||
|
||||
2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing)
|
||||
|
||||
(Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/)
|
64
vendor/github.com/sirupsen/logrus/alt_exit.go
generated
vendored
64
vendor/github.com/sirupsen/logrus/alt_exit.go
generated
vendored
@ -1,64 +0,0 @@
|
||||
package logrus
|
||||
|
||||
// The following code was sourced and modified from the
|
||||
// https://github.com/tebeka/atexit package governed by the following license:
|
||||
//
|
||||
// Copyright (c) 2012 Miki Tebeka <miki.tebeka@gmail.com>.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
// this software and associated documentation files (the "Software"), to deal in
|
||||
// the Software without restriction, including without limitation the rights to
|
||||
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
// the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
// subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in all
|
||||
// copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
var handlers = []func(){}
|
||||
|
||||
func runHandler(handler func()) {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err)
|
||||
}
|
||||
}()
|
||||
|
||||
handler()
|
||||
}
|
||||
|
||||
func runHandlers() {
|
||||
for _, handler := range handlers {
|
||||
runHandler(handler)
|
||||
}
|
||||
}
|
||||
|
||||
// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code)
|
||||
func Exit(code int) {
|
||||
runHandlers()
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke
|
||||
// all handlers. The handlers will also be invoked when any Fatal log entry is
|
||||
// made.
|
||||
//
|
||||
// This method is useful when a caller wishes to use logrus to log a fatal
|
||||
// message but also needs to gracefully shutdown. An example usecase could be
|
||||
// closing database connections, or sending a alert that the application is
|
||||
// closing.
|
||||
func RegisterExitHandler(handler func()) {
|
||||
handlers = append(handlers, handler)
|
||||
}
|
14
vendor/github.com/sirupsen/logrus/appveyor.yml
generated
vendored
14
vendor/github.com/sirupsen/logrus/appveyor.yml
generated
vendored
@ -1,14 +0,0 @@
|
||||
version: "{build}"
|
||||
platform: x64
|
||||
clone_folder: c:\gopath\src\github.com\sirupsen\logrus
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
install:
|
||||
- set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
|
||||
- go version
|
||||
build_script:
|
||||
- go get -t
|
||||
- go test
|
26
vendor/github.com/sirupsen/logrus/doc.go
generated
vendored
26
vendor/github.com/sirupsen/logrus/doc.go
generated
vendored
@ -1,26 +0,0 @@
|
||||
/*
|
||||
Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
|
||||
|
||||
|
||||
The simplest way to use Logrus is simply the package-level exported logger:
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.WithFields(log.Fields{
|
||||
"animal": "walrus",
|
||||
"number": 1,
|
||||
"size": 10,
|
||||
}).Info("A walrus appears")
|
||||
}
|
||||
|
||||
Output:
|
||||
time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
|
||||
|
||||
For a full guide visit https://github.com/sirupsen/logrus
|
||||
*/
|
||||
package logrus
|
300
vendor/github.com/sirupsen/logrus/entry.go
generated
vendored
300
vendor/github.com/sirupsen/logrus/entry.go
generated
vendored
@ -1,300 +0,0 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var bufferPool *sync.Pool
|
||||
|
||||
func init() {
|
||||
bufferPool = &sync.Pool{
|
||||
New: func() interface{} {
|
||||
return new(bytes.Buffer)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Defines the key when adding errors using WithError.
|
||||
var ErrorKey = "error"
|
||||
|
||||
// An entry is the final or intermediate Logrus logging entry. It contains all
|
||||
// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
|
||||
// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
|
||||
// passed around as much as you wish to avoid field duplication.
|
||||
type Entry struct {
|
||||
Logger *Logger
|
||||
|
||||
// Contains all the fields set by the user.
|
||||
Data Fields
|
||||
|
||||
// Time at which the log entry was created
|
||||
Time time.Time
|
||||
|
||||
// Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
|
||||
// This field will be set on entry firing and the value will be equal to the one in Logger struct field.
|
||||
Level Level
|
||||
|
||||
// Message passed to Debug, Info, Warn, Error, Fatal or Panic
|
||||
Message string
|
||||
|
||||
// When formatter is called in entry.log(), an Buffer may be set to entry
|
||||
Buffer *bytes.Buffer
|
||||
}
|
||||
|
||||
func NewEntry(logger *Logger) *Entry {
|
||||
return &Entry{
|
||||
Logger: logger,
|
||||
// Default is five fields, give a little extra room
|
||||
Data: make(Fields, 5),
|
||||
}
|
||||
}
|
||||
|
||||
// Returns the string representation from the reader and ultimately the
|
||||
// formatter.
|
||||
func (entry *Entry) String() (string, error) {
|
||||
serialized, err := entry.Logger.Formatter.Format(entry)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
str := string(serialized)
|
||||
return str, nil
|
||||
}
|
||||
|
||||
// Add an error as single field (using the key defined in ErrorKey) to the Entry.
|
||||
func (entry *Entry) WithError(err error) *Entry {
|
||||
return entry.WithField(ErrorKey, err)
|
||||
}
|
||||
|
||||
// Add a single field to the Entry.
|
||||
func (entry *Entry) WithField(key string, value interface{}) *Entry {
|
||||
return entry.WithFields(Fields{key: value})
|
||||
}
|
||||
|
||||
// Add a map of fields to the Entry.
|
||||
func (entry *Entry) WithFields(fields Fields) *Entry {
|
||||
data := make(Fields, len(entry.Data)+len(fields))
|
||||
for k, v := range entry.Data {
|
||||
data[k] = v
|
||||
}
|
||||
for k, v := range fields {
|
||||
data[k] = v
|
||||
}
|
||||
return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time}
|
||||
}
|
||||
|
||||
// Overrides the time of the Entry.
|
||||
func (entry *Entry) WithTime(t time.Time) *Entry {
|
||||
return &Entry{Logger: entry.Logger, Data: entry.Data, Time: t}
|
||||
}
|
||||
|
||||
// This function is not declared with a pointer value because otherwise
|
||||
// race conditions will occur when using multiple goroutines
|
||||
func (entry Entry) log(level Level, msg string) {
|
||||
var buffer *bytes.Buffer
|
||||
|
||||
// Default to now, but allow users to override if they want.
|
||||
//
|
||||
// We don't have to worry about polluting future calls to Entry#log()
|
||||
// with this assignment because this function is declared with a
|
||||
// non-pointer receiver.
|
||||
if entry.Time.IsZero() {
|
||||
entry.Time = time.Now()
|
||||
}
|
||||
|
||||
entry.Level = level
|
||||
entry.Message = msg
|
||||
|
||||
entry.fireHooks()
|
||||
|
||||
buffer = bufferPool.Get().(*bytes.Buffer)
|
||||
buffer.Reset()
|
||||
defer bufferPool.Put(buffer)
|
||||
entry.Buffer = buffer
|
||||
|
||||
entry.write()
|
||||
|
||||
entry.Buffer = nil
|
||||
|
||||
// To avoid Entry#log() returning a value that only would make sense for
|
||||
// panic() to use in Entry#Panic(), we avoid the allocation by checking
|
||||
// directly here.
|
||||
if level <= PanicLevel {
|
||||
panic(&entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) fireHooks() {
|
||||
entry.Logger.mu.Lock()
|
||||
defer entry.Logger.mu.Unlock()
|
||||
err := entry.Logger.Hooks.Fire(entry.Level, entry)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) write() {
|
||||
serialized, err := entry.Logger.Formatter.Format(entry)
|
||||
entry.Logger.mu.Lock()
|
||||
defer entry.Logger.mu.Unlock()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
|
||||
} else {
|
||||
_, err = entry.Logger.Out.Write(serialized)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Debug(args ...interface{}) {
|
||||
if entry.Logger.level() >= DebugLevel {
|
||||
entry.log(DebugLevel, fmt.Sprint(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Print(args ...interface{}) {
|
||||
entry.Info(args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Info(args ...interface{}) {
|
||||
if entry.Logger.level() >= InfoLevel {
|
||||
entry.log(InfoLevel, fmt.Sprint(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Warn(args ...interface{}) {
|
||||
if entry.Logger.level() >= WarnLevel {
|
||||
entry.log(WarnLevel, fmt.Sprint(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Warning(args ...interface{}) {
|
||||
entry.Warn(args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Error(args ...interface{}) {
|
||||
if entry.Logger.level() >= ErrorLevel {
|
||||
entry.log(ErrorLevel, fmt.Sprint(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Fatal(args ...interface{}) {
|
||||
if entry.Logger.level() >= FatalLevel {
|
||||
entry.log(FatalLevel, fmt.Sprint(args...))
|
||||
}
|
||||
Exit(1)
|
||||
}
|
||||
|
||||
func (entry *Entry) Panic(args ...interface{}) {
|
||||
if entry.Logger.level() >= PanicLevel {
|
||||
entry.log(PanicLevel, fmt.Sprint(args...))
|
||||
}
|
||||
panic(fmt.Sprint(args...))
|
||||
}
|
||||
|
||||
// Entry Printf family functions
|
||||
|
||||
func (entry *Entry) Debugf(format string, args ...interface{}) {
|
||||
if entry.Logger.level() >= DebugLevel {
|
||||
entry.Debug(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Infof(format string, args ...interface{}) {
|
||||
if entry.Logger.level() >= InfoLevel {
|
||||
entry.Info(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Printf(format string, args ...interface{}) {
|
||||
entry.Infof(format, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Warnf(format string, args ...interface{}) {
|
||||
if entry.Logger.level() >= WarnLevel {
|
||||
entry.Warn(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Warningf(format string, args ...interface{}) {
|
||||
entry.Warnf(format, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Errorf(format string, args ...interface{}) {
|
||||
if entry.Logger.level() >= ErrorLevel {
|
||||
entry.Error(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Fatalf(format string, args ...interface{}) {
|
||||
if entry.Logger.level() >= FatalLevel {
|
||||
entry.Fatal(fmt.Sprintf(format, args...))
|
||||
}
|
||||
Exit(1)
|
||||
}
|
||||
|
||||
func (entry *Entry) Panicf(format string, args ...interface{}) {
|
||||
if entry.Logger.level() >= PanicLevel {
|
||||
entry.Panic(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
// Entry Println family functions
|
||||
|
||||
func (entry *Entry) Debugln(args ...interface{}) {
|
||||
if entry.Logger.level() >= DebugLevel {
|
||||
entry.Debug(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Infoln(args ...interface{}) {
|
||||
if entry.Logger.level() >= InfoLevel {
|
||||
entry.Info(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Println(args ...interface{}) {
|
||||
entry.Infoln(args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Warnln(args ...interface{}) {
|
||||
if entry.Logger.level() >= WarnLevel {
|
||||
entry.Warn(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Warningln(args ...interface{}) {
|
||||
entry.Warnln(args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Errorln(args ...interface{}) {
|
||||
if entry.Logger.level() >= ErrorLevel {
|
||||
entry.Error(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Fatalln(args ...interface{}) {
|
||||
if entry.Logger.level() >= FatalLevel {
|
||||
entry.Fatal(entry.sprintlnn(args...))
|
||||
}
|
||||
Exit(1)
|
||||
}
|
||||
|
||||
func (entry *Entry) Panicln(args ...interface{}) {
|
||||
if entry.Logger.level() >= PanicLevel {
|
||||
entry.Panic(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
// Sprintlnn => Sprint no newline. This is to get the behavior of how
|
||||
// fmt.Sprintln where spaces are always added between operands, regardless of
|
||||
// their type. Instead of vendoring the Sprintln implementation to spare a
|
||||
// string allocation, we do the simplest thing.
|
||||
func (entry *Entry) sprintlnn(args ...interface{}) string {
|
||||
msg := fmt.Sprintln(args...)
|
||||
return msg[:len(msg)-1]
|
||||
}
|
201
vendor/github.com/sirupsen/logrus/exported.go
generated
vendored
201
vendor/github.com/sirupsen/logrus/exported.go
generated
vendored
@ -1,201 +0,0 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
// std is the name of the standard logger in stdlib `log`
|
||||
std = New()
|
||||
)
|
||||
|
||||
func StandardLogger() *Logger {
|
||||
return std
|
||||
}
|
||||
|
||||
// SetOutput sets the standard logger output.
|
||||
func SetOutput(out io.Writer) {
|
||||
std.SetOutput(out)
|
||||
}
|
||||
|
||||
// SetFormatter sets the standard logger formatter.
|
||||
func SetFormatter(formatter Formatter) {
|
||||
std.mu.Lock()
|
||||
defer std.mu.Unlock()
|
||||
std.Formatter = formatter
|
||||
}
|
||||
|
||||
// SetLevel sets the standard logger level.
|
||||
func SetLevel(level Level) {
|
||||
std.mu.Lock()
|
||||
defer std.mu.Unlock()
|
||||
std.SetLevel(level)
|
||||
}
|
||||
|
||||
// GetLevel returns the standard logger level.
|
||||
func GetLevel() Level {
|
||||
std.mu.Lock()
|
||||
defer std.mu.Unlock()
|
||||
return std.level()
|
||||
}
|
||||
|
||||
// AddHook adds a hook to the standard logger hooks.
|
||||
func AddHook(hook Hook) {
|
||||
std.mu.Lock()
|
||||
defer std.mu.Unlock()
|
||||
std.Hooks.Add(hook)
|
||||
}
|
||||
|
||||
// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
|
||||
func WithError(err error) *Entry {
|
||||
return std.WithField(ErrorKey, err)
|
||||
}
|
||||
|
||||
// WithField creates an entry from the standard logger and adds a field to
|
||||
// it. If you want multiple fields, use `WithFields`.
|
||||
//
|
||||
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
|
||||
// or Panic on the Entry it returns.
|
||||
func WithField(key string, value interface{}) *Entry {
|
||||
return std.WithField(key, value)
|
||||
}
|
||||
|
||||
// WithFields creates an entry from the standard logger and adds multiple
|
||||
// fields to it. This is simply a helper for `WithField`, invoking it
|
||||
// once for each field.
|
||||
//
|
||||
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
|
||||
// or Panic on the Entry it returns.
|
||||
func WithFields(fields Fields) *Entry {
|
||||
return std.WithFields(fields)
|
||||
}
|
||||
|
||||
// WithTime creats an entry from the standard logger and overrides the time of
|
||||
// logs generated with it.
|
||||
//
|
||||
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
|
||||
// or Panic on the Entry it returns.
|
||||
func WithTime(t time.Time) *Entry {
|
||||
return std.WithTime(t)
|
||||
}
|
||||
|
||||
// Debug logs a message at level Debug on the standard logger.
|
||||
func Debug(args ...interface{}) {
|
||||
std.Debug(args...)
|
||||
}
|
||||
|
||||
// Print logs a message at level Info on the standard logger.
|
||||
func Print(args ...interface{}) {
|
||||
std.Print(args...)
|
||||
}
|
||||
|
||||
// Info logs a message at level Info on the standard logger.
|
||||
func Info(args ...interface{}) {
|
||||
std.Info(args...)
|
||||
}
|
||||
|
||||
// Warn logs a message at level Warn on the standard logger.
|
||||
func Warn(args ...interface{}) {
|
||||
std.Warn(args...)
|
||||
}
|
||||
|
||||
// Warning logs a message at level Warn on the standard logger.
|
||||
func Warning(args ...interface{}) {
|
||||
std.Warning(args...)
|
||||
}
|
||||
|
||||
// Error logs a message at level Error on the standard logger.
|
||||
func Error(args ...interface{}) {
|
||||
std.Error(args...)
|
||||
}
|
||||
|
||||
// Panic logs a message at level Panic on the standard logger.
|
||||
func Panic(args ...interface{}) {
|
||||
std.Panic(args...)
|
||||
}
|
||||
|
||||
// Fatal logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
|
||||
func Fatal(args ...interface{}) {
|
||||
std.Fatal(args...)
|
||||
}
|
||||
|
||||
// Debugf logs a message at level Debug on the standard logger.
|
||||
func Debugf(format string, args ...interface{}) {
|
||||
std.Debugf(format, args...)
|
||||
}
|
||||
|
||||
// Printf logs a message at level Info on the standard logger.
|
||||
func Printf(format string, args ...interface{}) {
|
||||
std.Printf(format, args...)
|
||||
}
|
||||
|
||||
// Infof logs a message at level Info on the standard logger.
|
||||
func Infof(format string, args ...interface{}) {
|
||||
std.Infof(format, args...)
|
||||
}
|
||||
|
||||
// Warnf logs a message at level Warn on the standard logger.
|
||||
func Warnf(format string, args ...interface{}) {
|
||||
std.Warnf(format, args...)
|
||||
}
|
||||
|
||||
// Warningf logs a message at level Warn on the standard logger.
|
||||
func Warningf(format string, args ...interface{}) {
|
||||
std.Warningf(format, args...)
|
||||
}
|
||||
|
||||
// Errorf logs a message at level Error on the standard logger.
|
||||
func Errorf(format string, args ...interface{}) {
|
||||
std.Errorf(format, args...)
|
||||
}
|
||||
|
||||
// Panicf logs a message at level Panic on the standard logger.
|
||||
func Panicf(format string, args ...interface{}) {
|
||||
std.Panicf(format, args...)
|
||||
}
|
||||
|
||||
// Fatalf logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
|
||||
func Fatalf(format string, args ...interface{}) {
|
||||
std.Fatalf(format, args...)
|
||||
}
|
||||
|
||||
// Debugln logs a message at level Debug on the standard logger.
|
||||
func Debugln(args ...interface{}) {
|
||||
std.Debugln(args...)
|
||||
}
|
||||
|
||||
// Println logs a message at level Info on the standard logger.
|
||||
func Println(args ...interface{}) {
|
||||
std.Println(args...)
|
||||
}
|
||||
|
||||
// Infoln logs a message at level Info on the standard logger.
|
||||
func Infoln(args ...interface{}) {
|
||||
std.Infoln(args...)
|
||||
}
|
||||
|
||||
// Warnln logs a message at level Warn on the standard logger.
|
||||
func Warnln(args ...interface{}) {
|
||||
std.Warnln(args...)
|
||||
}
|
||||
|
||||
// Warningln logs a message at level Warn on the standard logger.
|
||||
func Warningln(args ...interface{}) {
|
||||
std.Warningln(args...)
|
||||
}
|
||||
|
||||
// Errorln logs a message at level Error on the standard logger.
|
||||
func Errorln(args ...interface{}) {
|
||||
std.Errorln(args...)
|
||||
}
|
||||
|
||||
// Panicln logs a message at level Panic on the standard logger.
|
||||
func Panicln(args ...interface{}) {
|
||||
std.Panicln(args...)
|
||||
}
|
||||
|
||||
// Fatalln logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
|
||||
func Fatalln(args ...interface{}) {
|
||||
std.Fatalln(args...)
|
||||
}
|
51
vendor/github.com/sirupsen/logrus/formatter.go
generated
vendored
51
vendor/github.com/sirupsen/logrus/formatter.go
generated
vendored
@ -1,51 +0,0 @@
|
||||
package logrus
|
||||
|
||||
import "time"
|
||||
|
||||
const defaultTimestampFormat = time.RFC3339
|
||||
|
||||
// The Formatter interface is used to implement a custom Formatter. It takes an
|
||||
// `Entry`. It exposes all the fields, including the default ones:
|
||||
//
|
||||
// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
|
||||
// * `entry.Data["time"]`. The timestamp.
|
||||
// * `entry.Data["level"]. The level the entry was logged at.
|
||||
//
|
||||
// Any additional fields added with `WithField` or `WithFields` are also in
|
||||
// `entry.Data`. Format is expected to return an array of bytes which are then
|
||||
// logged to `logger.Out`.
|
||||
type Formatter interface {
|
||||
Format(*Entry) ([]byte, error)
|
||||
}
|
||||
|
||||
// This is to not silently overwrite `time`, `msg` and `level` fields when
|
||||
// dumping it. If this code wasn't there doing:
|
||||
//
|
||||
// logrus.WithField("level", 1).Info("hello")
|
||||
//
|
||||
// Would just silently drop the user provided level. Instead with this code
|
||||
// it'll logged as:
|
||||
//
|
||||
// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
|
||||
//
|
||||
// It's not exported because it's still using Data in an opinionated way. It's to
|
||||
// avoid code duplication between the two default formatters.
|
||||
func prefixFieldClashes(data Fields, fieldMap FieldMap) {
|
||||
timeKey := fieldMap.resolve(FieldKeyTime)
|
||||
if t, ok := data[timeKey]; ok {
|
||||
data["fields."+timeKey] = t
|
||||
delete(data, timeKey)
|
||||
}
|
||||
|
||||
msgKey := fieldMap.resolve(FieldKeyMsg)
|
||||
if m, ok := data[msgKey]; ok {
|
||||
data["fields."+msgKey] = m
|
||||
delete(data, msgKey)
|
||||
}
|
||||
|
||||
levelKey := fieldMap.resolve(FieldKeyLevel)
|
||||
if l, ok := data[levelKey]; ok {
|
||||
data["fields."+levelKey] = l
|
||||
delete(data, levelKey)
|
||||
}
|
||||
}
|
34
vendor/github.com/sirupsen/logrus/hooks.go
generated
vendored
34
vendor/github.com/sirupsen/logrus/hooks.go
generated
vendored
@ -1,34 +0,0 @@
|
||||
package logrus
|
||||
|
||||
// A hook to be fired when logging on the logging levels returned from
|
||||
// `Levels()` on your implementation of the interface. Note that this is not
|
||||
// fired in a goroutine or a channel with workers, you should handle such
|
||||
// functionality yourself if your call is non-blocking and you don't wish for
|
||||
// the logging calls for levels returned from `Levels()` to block.
|
||||
type Hook interface {
|
||||
Levels() []Level
|
||||
Fire(*Entry) error
|
||||
}
|
||||
|
||||
// Internal type for storing the hooks on a logger instance.
|
||||
type LevelHooks map[Level][]Hook
|
||||
|
||||
// Add a hook to an instance of logger. This is called with
|
||||
// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
|
||||
func (hooks LevelHooks) Add(hook Hook) {
|
||||
for _, level := range hook.Levels() {
|
||||
hooks[level] = append(hooks[level], hook)
|
||||
}
|
||||
}
|
||||
|
||||
// Fire all the hooks for the passed level. Used by `entry.log` to fire
|
||||
// appropriate hooks for a log entry.
|
||||
func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
|
||||
for _, hook := range hooks[level] {
|
||||
if err := hook.Fire(entry); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
89
vendor/github.com/sirupsen/logrus/json_formatter.go
generated
vendored
89
vendor/github.com/sirupsen/logrus/json_formatter.go
generated
vendored
@ -1,89 +0,0 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type fieldKey string
|
||||
|
||||
// FieldMap allows customization of the key names for default fields.
|
||||
type FieldMap map[fieldKey]string
|
||||
|
||||
// Default key names for the default fields
|
||||
const (
|
||||
FieldKeyMsg = "msg"
|
||||
FieldKeyLevel = "level"
|
||||
FieldKeyTime = "time"
|
||||
)
|
||||
|
||||
func (f FieldMap) resolve(key fieldKey) string {
|
||||
if k, ok := f[key]; ok {
|
||||
return k
|
||||
}
|
||||
|
||||
return string(key)
|
||||
}
|
||||
|
||||
// JSONFormatter formats logs into parsable json
|
||||
type JSONFormatter struct {
|
||||
// TimestampFormat sets the format used for marshaling timestamps.
|
||||
TimestampFormat string
|
||||
|
||||
// DisableTimestamp allows disabling automatic timestamps in output
|
||||
DisableTimestamp bool
|
||||
|
||||
// DataKey allows users to put all the log entry parameters into a nested dictionary at a given key.
|
||||
DataKey string
|
||||
|
||||
// FieldMap allows users to customize the names of keys for default fields.
|
||||
// As an example:
|
||||
// formatter := &JSONFormatter{
|
||||
// FieldMap: FieldMap{
|
||||
// FieldKeyTime: "@timestamp",
|
||||
// FieldKeyLevel: "@level",
|
||||
// FieldKeyMsg: "@message",
|
||||
// },
|
||||
// }
|
||||
FieldMap FieldMap
|
||||
}
|
||||
|
||||
// Format renders a single log entry
|
||||
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
||||
data := make(Fields, len(entry.Data)+3)
|
||||
for k, v := range entry.Data {
|
||||
switch v := v.(type) {
|
||||
case error:
|
||||
// Otherwise errors are ignored by `encoding/json`
|
||||
// https://github.com/sirupsen/logrus/issues/137
|
||||
data[k] = v.Error()
|
||||
default:
|
||||
data[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
if f.DataKey != "" {
|
||||
newData := make(Fields, 4)
|
||||
newData[f.DataKey] = data
|
||||
data = newData
|
||||
}
|
||||
|
||||
prefixFieldClashes(data, f.FieldMap)
|
||||
|
||||
timestampFormat := f.TimestampFormat
|
||||
if timestampFormat == "" {
|
||||
timestampFormat = defaultTimestampFormat
|
||||
}
|
||||
|
||||
if !f.DisableTimestamp {
|
||||
data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
|
||||
}
|
||||
data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
|
||||
data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
|
||||
|
||||
serialized, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
||||
}
|
||||
return append(serialized, '\n'), nil
|
||||
}
|
337
vendor/github.com/sirupsen/logrus/logger.go
generated
vendored
337
vendor/github.com/sirupsen/logrus/logger.go
generated
vendored
@ -1,337 +0,0 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Logger struct {
|
||||
// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
|
||||
// file, or leave it default which is `os.Stderr`. You can also set this to
|
||||
// something more adventorous, such as logging to Kafka.
|
||||
Out io.Writer
|
||||
// Hooks for the logger instance. These allow firing events based on logging
|
||||
// levels and log entries. For example, to send errors to an error tracking
|
||||
// service, log to StatsD or dump the core on fatal errors.
|
||||
Hooks LevelHooks
|
||||
// All log entries pass through the formatter before logged to Out. The
|
||||
// included formatters are `TextFormatter` and `JSONFormatter` for which
|
||||
// TextFormatter is the default. In development (when a TTY is attached) it
|
||||
// logs with colors, but to a file it wouldn't. You can easily implement your
|
||||
// own that implements the `Formatter` interface, see the `README` or included
|
||||
// formatters for examples.
|
||||
Formatter Formatter
|
||||
// The logging level the logger should log at. This is typically (and defaults
|
||||
// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
|
||||
// logged.
|
||||
Level Level
|
||||
// Used to sync writing to the log. Locking is enabled by Default
|
||||
mu MutexWrap
|
||||
// Reusable empty entry
|
||||
entryPool sync.Pool
|
||||
}
|
||||
|
||||
type MutexWrap struct {
|
||||
lock sync.Mutex
|
||||
disabled bool
|
||||
}
|
||||
|
||||
func (mw *MutexWrap) Lock() {
|
||||
if !mw.disabled {
|
||||
mw.lock.Lock()
|
||||
}
|
||||
}
|
||||
|
||||
func (mw *MutexWrap) Unlock() {
|
||||
if !mw.disabled {
|
||||
mw.lock.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (mw *MutexWrap) Disable() {
|
||||
mw.disabled = true
|
||||
}
|
||||
|
||||
// Creates a new logger. Configuration should be set by changing `Formatter`,
|
||||
// `Out` and `Hooks` directly on the default logger instance. You can also just
|
||||
// instantiate your own:
|
||||
//
|
||||
// var log = &Logger{
|
||||
// Out: os.Stderr,
|
||||
// Formatter: new(JSONFormatter),
|
||||
// Hooks: make(LevelHooks),
|
||||
// Level: logrus.DebugLevel,
|
||||
// }
|
||||
//
|
||||
// It's recommended to make this a global instance called `log`.
|
||||
func New() *Logger {
|
||||
return &Logger{
|
||||
Out: os.Stderr,
|
||||
Formatter: new(TextFormatter),
|
||||
Hooks: make(LevelHooks),
|
||||
Level: InfoLevel,
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) newEntry() *Entry {
|
||||
entry, ok := logger.entryPool.Get().(*Entry)
|
||||
if ok {
|
||||
return entry
|
||||
}
|
||||
return NewEntry(logger)
|
||||
}
|
||||
|
||||
func (logger *Logger) releaseEntry(entry *Entry) {
|
||||
logger.entryPool.Put(entry)
|
||||
}
|
||||
|
||||
// Adds a field to the log entry, note that it doesn't log until you call
|
||||
// Debug, Print, Info, Warn, Error, Fatal or Panic. It only creates a log entry.
|
||||
// If you want multiple fields, use `WithFields`.
|
||||
func (logger *Logger) WithField(key string, value interface{}) *Entry {
|
||||
entry := logger.newEntry()
|
||||
defer logger.releaseEntry(entry)
|
||||
return entry.WithField(key, value)
|
||||
}
|
||||
|
||||
// Adds a struct of fields to the log entry. All it does is call `WithField` for
|
||||
// each `Field`.
|
||||
func (logger *Logger) WithFields(fields Fields) *Entry {
|
||||
entry := logger.newEntry()
|
||||
defer logger.releaseEntry(entry)
|
||||
return entry.WithFields(fields)
|
||||
}
|
||||
|
||||
// Add an error as single field to the log entry. All it does is call
|
||||
// `WithError` for the given `error`.
|
||||
func (logger *Logger) WithError(err error) *Entry {
|
||||
entry := logger.newEntry()
|
||||
defer logger.releaseEntry(entry)
|
||||
return entry.WithError(err)
|
||||
}
|
||||
|
||||
// Overrides the time of the log entry.
|
||||
func (logger *Logger) WithTime(t time.Time) *Entry {
|
||||
entry := logger.newEntry()
|
||||
defer logger.releaseEntry(entry)
|
||||
return entry.WithTime(t)
|
||||
}
|
||||
|
||||
func (logger *Logger) Debugf(format string, args ...interface{}) {
|
||||
if logger.level() >= DebugLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Debugf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Infof(format string, args ...interface{}) {
|
||||
if logger.level() >= InfoLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Infof(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Printf(format string, args ...interface{}) {
|
||||
entry := logger.newEntry()
|
||||
entry.Printf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
|
||||
func (logger *Logger) Warnf(format string, args ...interface{}) {
|
||||
if logger.level() >= WarnLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Warnf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Warningf(format string, args ...interface{}) {
|
||||
if logger.level() >= WarnLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Warnf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Errorf(format string, args ...interface{}) {
|
||||
if logger.level() >= ErrorLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Errorf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Fatalf(format string, args ...interface{}) {
|
||||
if logger.level() >= FatalLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Fatalf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
Exit(1)
|
||||
}
|
||||
|
||||
func (logger *Logger) Panicf(format string, args ...interface{}) {
|
||||
if logger.level() >= PanicLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Panicf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Debug(args ...interface{}) {
|
||||
if logger.level() >= DebugLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Debug(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Info(args ...interface{}) {
|
||||
if logger.level() >= InfoLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Info(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Print(args ...interface{}) {
|
||||
entry := logger.newEntry()
|
||||
entry.Info(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
|
||||
func (logger *Logger) Warn(args ...interface{}) {
|
||||
if logger.level() >= WarnLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Warn(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Warning(args ...interface{}) {
|
||||
if logger.level() >= WarnLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Warn(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Error(args ...interface{}) {
|
||||
if logger.level() >= ErrorLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Error(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Fatal(args ...interface{}) {
|
||||
if logger.level() >= FatalLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Fatal(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
Exit(1)
|
||||
}
|
||||
|
||||
func (logger *Logger) Panic(args ...interface{}) {
|
||||
if logger.level() >= PanicLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Panic(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Debugln(args ...interface{}) {
|
||||
if logger.level() >= DebugLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Debugln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Infoln(args ...interface{}) {
|
||||
if logger.level() >= InfoLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Infoln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Println(args ...interface{}) {
|
||||
entry := logger.newEntry()
|
||||
entry.Println(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
|
||||
func (logger *Logger) Warnln(args ...interface{}) {
|
||||
if logger.level() >= WarnLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Warnln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Warningln(args ...interface{}) {
|
||||
if logger.level() >= WarnLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Warnln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Errorln(args ...interface{}) {
|
||||
if logger.level() >= ErrorLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Errorln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Fatalln(args ...interface{}) {
|
||||
if logger.level() >= FatalLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Fatalln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
Exit(1)
|
||||
}
|
||||
|
||||
func (logger *Logger) Panicln(args ...interface{}) {
|
||||
if logger.level() >= PanicLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Panicln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
//When file is opened with appending mode, it's safe to
|
||||
//write concurrently to a file (within 4k message on Linux).
|
||||
//In these cases user can choose to disable the lock.
|
||||
func (logger *Logger) SetNoLock() {
|
||||
logger.mu.Disable()
|
||||
}
|
||||
|
||||
func (logger *Logger) level() Level {
|
||||
return Level(atomic.LoadUint32((*uint32)(&logger.Level)))
|
||||
}
|
||||
|
||||
func (logger *Logger) SetLevel(level Level) {
|
||||
atomic.StoreUint32((*uint32)(&logger.Level), uint32(level))
|
||||
}
|
||||
|
||||
func (logger *Logger) SetOutput(out io.Writer) {
|
||||
logger.mu.Lock()
|
||||
defer logger.mu.Unlock()
|
||||
logger.Out = out
|
||||
}
|
||||
|
||||
func (logger *Logger) AddHook(hook Hook) {
|
||||
logger.mu.Lock()
|
||||
defer logger.mu.Unlock()
|
||||
logger.Hooks.Add(hook)
|
||||
}
|
143
vendor/github.com/sirupsen/logrus/logrus.go
generated
vendored
143
vendor/github.com/sirupsen/logrus/logrus.go
generated
vendored
@ -1,143 +0,0 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Fields type, used to pass to `WithFields`.
|
||||
type Fields map[string]interface{}
|
||||
|
||||
// Level type
|
||||
type Level uint32
|
||||
|
||||
// Convert the Level to a string. E.g. PanicLevel becomes "panic".
|
||||
func (level Level) String() string {
|
||||
switch level {
|
||||
case DebugLevel:
|
||||
return "debug"
|
||||
case InfoLevel:
|
||||
return "info"
|
||||
case WarnLevel:
|
||||
return "warning"
|
||||
case ErrorLevel:
|
||||
return "error"
|
||||
case FatalLevel:
|
||||
return "fatal"
|
||||
case PanicLevel:
|
||||
return "panic"
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
// ParseLevel takes a string level and returns the Logrus log level constant.
|
||||
func ParseLevel(lvl string) (Level, error) {
|
||||
switch strings.ToLower(lvl) {
|
||||
case "panic":
|
||||
return PanicLevel, nil
|
||||
case "fatal":
|
||||
return FatalLevel, nil
|
||||
case "error":
|
||||
return ErrorLevel, nil
|
||||
case "warn", "warning":
|
||||
return WarnLevel, nil
|
||||
case "info":
|
||||
return InfoLevel, nil
|
||||
case "debug":
|
||||
return DebugLevel, nil
|
||||
}
|
||||
|
||||
var l Level
|
||||
return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
|
||||
}
|
||||
|
||||
// A constant exposing all logging levels
|
||||
var AllLevels = []Level{
|
||||
PanicLevel,
|
||||
FatalLevel,
|
||||
ErrorLevel,
|
||||
WarnLevel,
|
||||
InfoLevel,
|
||||
DebugLevel,
|
||||
}
|
||||
|
||||
// These are the different logging levels. You can set the logging level to log
|
||||
// on your instance of logger, obtained with `logrus.New()`.
|
||||
const (
|
||||
// PanicLevel level, highest level of severity. Logs and then calls panic with the
|
||||
// message passed to Debug, Info, ...
|
||||
PanicLevel Level = iota
|
||||
// FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
|
||||
// logging level is set to Panic.
|
||||
FatalLevel
|
||||
// ErrorLevel level. Logs. Used for errors that should definitely be noted.
|
||||
// Commonly used for hooks to send errors to an error tracking service.
|
||||
ErrorLevel
|
||||
// WarnLevel level. Non-critical entries that deserve eyes.
|
||||
WarnLevel
|
||||
// InfoLevel level. General operational entries about what's going on inside the
|
||||
// application.
|
||||
InfoLevel
|
||||
// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
|
||||
DebugLevel
|
||||
)
|
||||
|
||||
// Won't compile if StdLogger can't be realized by a log.Logger
|
||||
var (
|
||||
_ StdLogger = &log.Logger{}
|
||||
_ StdLogger = &Entry{}
|
||||
_ StdLogger = &Logger{}
|
||||
)
|
||||
|
||||
// StdLogger is what your logrus-enabled library should take, that way
|
||||
// it'll accept a stdlib logger and a logrus logger. There's no standard
|
||||
// interface, this is the closest we get, unfortunately.
|
||||
type StdLogger interface {
|
||||
Print(...interface{})
|
||||
Printf(string, ...interface{})
|
||||
Println(...interface{})
|
||||
|
||||
Fatal(...interface{})
|
||||
Fatalf(string, ...interface{})
|
||||
Fatalln(...interface{})
|
||||
|
||||
Panic(...interface{})
|
||||
Panicf(string, ...interface{})
|
||||
Panicln(...interface{})
|
||||
}
|
||||
|
||||
// The FieldLogger interface generalizes the Entry and Logger types
|
||||
type FieldLogger interface {
|
||||
WithField(key string, value interface{}) *Entry
|
||||
WithFields(fields Fields) *Entry
|
||||
WithError(err error) *Entry
|
||||
|
||||
Debugf(format string, args ...interface{})
|
||||
Infof(format string, args ...interface{})
|
||||
Printf(format string, args ...interface{})
|
||||
Warnf(format string, args ...interface{})
|
||||
Warningf(format string, args ...interface{})
|
||||
Errorf(format string, args ...interface{})
|
||||
Fatalf(format string, args ...interface{})
|
||||
Panicf(format string, args ...interface{})
|
||||
|
||||
Debug(args ...interface{})
|
||||
Info(args ...interface{})
|
||||
Print(args ...interface{})
|
||||
Warn(args ...interface{})
|
||||
Warning(args ...interface{})
|
||||
Error(args ...interface{})
|
||||
Fatal(args ...interface{})
|
||||
Panic(args ...interface{})
|
||||
|
||||
Debugln(args ...interface{})
|
||||
Infoln(args ...interface{})
|
||||
Println(args ...interface{})
|
||||
Warnln(args ...interface{})
|
||||
Warningln(args ...interface{})
|
||||
Errorln(args ...interface{})
|
||||
Fatalln(args ...interface{})
|
||||
Panicln(args ...interface{})
|
||||
}
|
10
vendor/github.com/sirupsen/logrus/terminal_bsd.go
generated
vendored
10
vendor/github.com/sirupsen/logrus/terminal_bsd.go
generated
vendored
@ -1,10 +0,0 @@
|
||||
// +build darwin freebsd openbsd netbsd dragonfly
|
||||
// +build !appengine,!gopherjs
|
||||
|
||||
package logrus
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
const ioctlReadTermios = unix.TIOCGETA
|
||||
|
||||
type Termios unix.Termios
|
11
vendor/github.com/sirupsen/logrus/terminal_check_appengine.go
generated
vendored
11
vendor/github.com/sirupsen/logrus/terminal_check_appengine.go
generated
vendored
@ -1,11 +0,0 @@
|
||||
// +build appengine gopherjs
|
||||
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
func checkIfTerminal(w io.Writer) bool {
|
||||
return true
|
||||
}
|
19
vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go
generated
vendored
19
vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go
generated
vendored
@ -1,19 +0,0 @@
|
||||
// +build !appengine,!gopherjs
|
||||
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
)
|
||||
|
||||
func checkIfTerminal(w io.Writer) bool {
|
||||
switch v := w.(type) {
|
||||
case *os.File:
|
||||
return terminal.IsTerminal(int(v.Fd()))
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
14
vendor/github.com/sirupsen/logrus/terminal_linux.go
generated
vendored
14
vendor/github.com/sirupsen/logrus/terminal_linux.go
generated
vendored
@ -1,14 +0,0 @@
|
||||
// Based on ssh/terminal:
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !appengine,!gopherjs
|
||||
|
||||
package logrus
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
const ioctlReadTermios = unix.TCGETS
|
||||
|
||||
type Termios unix.Termios
|
195
vendor/github.com/sirupsen/logrus/text_formatter.go
generated
vendored
195
vendor/github.com/sirupsen/logrus/text_formatter.go
generated
vendored
@ -1,195 +0,0 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
nocolor = 0
|
||||
red = 31
|
||||
green = 32
|
||||
yellow = 33
|
||||
blue = 36
|
||||
gray = 37
|
||||
)
|
||||
|
||||
var (
|
||||
baseTimestamp time.Time
|
||||
emptyFieldMap FieldMap
|
||||
)
|
||||
|
||||
func init() {
|
||||
baseTimestamp = time.Now()
|
||||
}
|
||||
|
||||
// TextFormatter formats logs into text
|
||||
type TextFormatter struct {
|
||||
// Set to true to bypass checking for a TTY before outputting colors.
|
||||
ForceColors bool
|
||||
|
||||
// Force disabling colors.
|
||||
DisableColors bool
|
||||
|
||||
// Disable timestamp logging. useful when output is redirected to logging
|
||||
// system that already adds timestamps.
|
||||
DisableTimestamp bool
|
||||
|
||||
// Enable logging the full timestamp when a TTY is attached instead of just
|
||||
// the time passed since beginning of execution.
|
||||
FullTimestamp bool
|
||||
|
||||
// TimestampFormat to use for display when a full timestamp is printed
|
||||
TimestampFormat string
|
||||
|
||||
// The fields are sorted by default for a consistent output. For applications
|
||||
// that log extremely frequently and don't use the JSON formatter this may not
|
||||
// be desired.
|
||||
DisableSorting bool
|
||||
|
||||
// Disables the truncation of the level text to 4 characters.
|
||||
DisableLevelTruncation bool
|
||||
|
||||
// QuoteEmptyFields will wrap empty fields in quotes if true
|
||||
QuoteEmptyFields bool
|
||||
|
||||
// Whether the logger's out is to a terminal
|
||||
isTerminal bool
|
||||
|
||||
// FieldMap allows users to customize the names of keys for default fields.
|
||||
// As an example:
|
||||
// formatter := &TextFormatter{
|
||||
// FieldMap: FieldMap{
|
||||
// FieldKeyTime: "@timestamp",
|
||||
// FieldKeyLevel: "@level",
|
||||
// FieldKeyMsg: "@message"}}
|
||||
FieldMap FieldMap
|
||||
|
||||
sync.Once
|
||||
}
|
||||
|
||||
func (f *TextFormatter) init(entry *Entry) {
|
||||
if entry.Logger != nil {
|
||||
f.isTerminal = checkIfTerminal(entry.Logger.Out)
|
||||
}
|
||||
}
|
||||
|
||||
// Format renders a single log entry
|
||||
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
|
||||
prefixFieldClashes(entry.Data, f.FieldMap)
|
||||
|
||||
keys := make([]string, 0, len(entry.Data))
|
||||
for k := range entry.Data {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
|
||||
if !f.DisableSorting {
|
||||
sort.Strings(keys)
|
||||
}
|
||||
|
||||
var b *bytes.Buffer
|
||||
if entry.Buffer != nil {
|
||||
b = entry.Buffer
|
||||
} else {
|
||||
b = &bytes.Buffer{}
|
||||
}
|
||||
|
||||
f.Do(func() { f.init(entry) })
|
||||
|
||||
isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors
|
||||
|
||||
timestampFormat := f.TimestampFormat
|
||||
if timestampFormat == "" {
|
||||
timestampFormat = defaultTimestampFormat
|
||||
}
|
||||
if isColored {
|
||||
f.printColored(b, entry, keys, timestampFormat)
|
||||
} else {
|
||||
if !f.DisableTimestamp {
|
||||
f.appendKeyValue(b, f.FieldMap.resolve(FieldKeyTime), entry.Time.Format(timestampFormat))
|
||||
}
|
||||
f.appendKeyValue(b, f.FieldMap.resolve(FieldKeyLevel), entry.Level.String())
|
||||
if entry.Message != "" {
|
||||
f.appendKeyValue(b, f.FieldMap.resolve(FieldKeyMsg), entry.Message)
|
||||
}
|
||||
for _, key := range keys {
|
||||
f.appendKeyValue(b, key, entry.Data[key])
|
||||
}
|
||||
}
|
||||
|
||||
b.WriteByte('\n')
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
|
||||
var levelColor int
|
||||
switch entry.Level {
|
||||
case DebugLevel:
|
||||
levelColor = gray
|
||||
case WarnLevel:
|
||||
levelColor = yellow
|
||||
case ErrorLevel, FatalLevel, PanicLevel:
|
||||
levelColor = red
|
||||
default:
|
||||
levelColor = blue
|
||||
}
|
||||
|
||||
levelText := strings.ToUpper(entry.Level.String())
|
||||
if !f.DisableLevelTruncation {
|
||||
levelText = levelText[0:4]
|
||||
}
|
||||
|
||||
if f.DisableTimestamp {
|
||||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message)
|
||||
} else if !f.FullTimestamp {
|
||||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message)
|
||||
} else {
|
||||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
|
||||
}
|
||||
for _, k := range keys {
|
||||
v := entry.Data[k]
|
||||
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
|
||||
f.appendValue(b, v)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *TextFormatter) needsQuoting(text string) bool {
|
||||
if f.QuoteEmptyFields && len(text) == 0 {
|
||||
return true
|
||||
}
|
||||
for _, ch := range text {
|
||||
if !((ch >= 'a' && ch <= 'z') ||
|
||||
(ch >= 'A' && ch <= 'Z') ||
|
||||
(ch >= '0' && ch <= '9') ||
|
||||
ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
|
||||
if b.Len() > 0 {
|
||||
b.WriteByte(' ')
|
||||
}
|
||||
b.WriteString(key)
|
||||
b.WriteByte('=')
|
||||
f.appendValue(b, value)
|
||||
}
|
||||
|
||||
func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
|
||||
stringVal, ok := value.(string)
|
||||
if !ok {
|
||||
stringVal = fmt.Sprint(value)
|
||||
}
|
||||
|
||||
if !f.needsQuoting(stringVal) {
|
||||
b.WriteString(stringVal)
|
||||
} else {
|
||||
b.WriteString(fmt.Sprintf("%q", stringVal))
|
||||
}
|
||||
}
|
62
vendor/github.com/sirupsen/logrus/writer.go
generated
vendored
62
vendor/github.com/sirupsen/logrus/writer.go
generated
vendored
@ -1,62 +0,0 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
func (logger *Logger) Writer() *io.PipeWriter {
|
||||
return logger.WriterLevel(InfoLevel)
|
||||
}
|
||||
|
||||
func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
|
||||
return NewEntry(logger).WriterLevel(level)
|
||||
}
|
||||
|
||||
func (entry *Entry) Writer() *io.PipeWriter {
|
||||
return entry.WriterLevel(InfoLevel)
|
||||
}
|
||||
|
||||
func (entry *Entry) WriterLevel(level Level) *io.PipeWriter {
|
||||
reader, writer := io.Pipe()
|
||||
|
||||
var printFunc func(args ...interface{})
|
||||
|
||||
switch level {
|
||||
case DebugLevel:
|
||||
printFunc = entry.Debug
|
||||
case InfoLevel:
|
||||
printFunc = entry.Info
|
||||
case WarnLevel:
|
||||
printFunc = entry.Warn
|
||||
case ErrorLevel:
|
||||
printFunc = entry.Error
|
||||
case FatalLevel:
|
||||
printFunc = entry.Fatal
|
||||
case PanicLevel:
|
||||
printFunc = entry.Panic
|
||||
default:
|
||||
printFunc = entry.Print
|
||||
}
|
||||
|
||||
go entry.writerScanner(reader, printFunc)
|
||||
runtime.SetFinalizer(writer, writerFinalizer)
|
||||
|
||||
return writer
|
||||
}
|
||||
|
||||
func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
|
||||
scanner := bufio.NewScanner(reader)
|
||||
for scanner.Scan() {
|
||||
printFunc(scanner.Text())
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
entry.Errorf("Error while reading from Writer: %s", err)
|
||||
}
|
||||
reader.Close()
|
||||
}
|
||||
|
||||
func writerFinalizer(writer *io.PipeWriter) {
|
||||
writer.Close()
|
||||
}
|
202
vendor/github.com/spf13/cobra/cobra/cmd/testdata/LICENSE.golden
generated
vendored
202
vendor/github.com/spf13/cobra/cobra/cmd/testdata/LICENSE.golden
generated
vendored
@ -1,202 +0,0 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
0
vendor/golang.org/x/sys/unix/mkall.sh
generated
vendored
Executable file → Normal file
0
vendor/golang.org/x/sys/unix/mkall.sh
generated
vendored
Executable file → Normal file
0
vendor/golang.org/x/sys/unix/mkerrors.sh
generated
vendored
Executable file → Normal file
0
vendor/golang.org/x/sys/unix/mkerrors.sh
generated
vendored
Executable file → Normal file
0
vendor/golang.org/x/sys/unix/mksyscall.pl
generated
vendored
Executable file → Normal file
0
vendor/golang.org/x/sys/unix/mksyscall.pl
generated
vendored
Executable file → Normal file
0
vendor/golang.org/x/sys/unix/mksyscall_solaris.pl
generated
vendored
Executable file → Normal file
0
vendor/golang.org/x/sys/unix/mksyscall_solaris.pl
generated
vendored
Executable file → Normal file
0
vendor/golang.org/x/sys/unix/mksysctl_openbsd.pl
generated
vendored
Executable file → Normal file
0
vendor/golang.org/x/sys/unix/mksysctl_openbsd.pl
generated
vendored
Executable file → Normal file
0
vendor/golang.org/x/sys/unix/mksysnum_darwin.pl
generated
vendored
Executable file → Normal file
0
vendor/golang.org/x/sys/unix/mksysnum_darwin.pl
generated
vendored
Executable file → Normal file
0
vendor/golang.org/x/sys/unix/mksysnum_dragonfly.pl
generated
vendored
Executable file → Normal file
0
vendor/golang.org/x/sys/unix/mksysnum_dragonfly.pl
generated
vendored
Executable file → Normal file
0
vendor/golang.org/x/sys/unix/mksysnum_freebsd.pl
generated
vendored
Executable file → Normal file
0
vendor/golang.org/x/sys/unix/mksysnum_freebsd.pl
generated
vendored
Executable file → Normal file
0
vendor/golang.org/x/sys/unix/mksysnum_netbsd.pl
generated
vendored
Executable file → Normal file
0
vendor/golang.org/x/sys/unix/mksysnum_netbsd.pl
generated
vendored
Executable file → Normal file
0
vendor/golang.org/x/sys/unix/mksysnum_openbsd.pl
generated
vendored
Executable file → Normal file
0
vendor/golang.org/x/sys/unix/mksysnum_openbsd.pl
generated
vendored
Executable file → Normal file
702
vendor/golang.org/x/text/collate/build/builder.go
generated
vendored
702
vendor/golang.org/x/text/collate/build/builder.go
generated
vendored
@ -1,702 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package build // import "golang.org/x/text/collate/build"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"sort"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/text/internal/colltab"
|
||||
"golang.org/x/text/language"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
// TODO: optimizations:
|
||||
// - expandElem is currently 20K. By putting unique colElems in a separate
|
||||
// table and having a byte array of indexes into this table, we can reduce
|
||||
// the total size to about 7K. By also factoring out the length bytes, we
|
||||
// can reduce this to about 6K.
|
||||
// - trie valueBlocks are currently 100K. There are a lot of sparse blocks
|
||||
// and many consecutive values with the same stride. This can be further
|
||||
// compacted.
|
||||
// - Compress secondary weights into 8 bits.
|
||||
// - Some LDML specs specify a context element. Currently we simply concatenate
|
||||
// those. Context can be implemented using the contraction trie. If Builder
|
||||
// could analyze and detect when using a context makes sense, there is no
|
||||
// need to expose this construct in the API.
|
||||
|
||||
// A Builder builds a root collation table. The user must specify the
|
||||
// collation elements for each entry. A common use will be to base the weights
|
||||
// on those specified in the allkeys* file as provided by the UCA or CLDR.
|
||||
type Builder struct {
|
||||
index *trieBuilder
|
||||
root ordering
|
||||
locale []*Tailoring
|
||||
t *table
|
||||
err error
|
||||
built bool
|
||||
|
||||
minNonVar int // lowest primary recorded for a variable
|
||||
varTop int // highest primary recorded for a non-variable
|
||||
|
||||
// indexes used for reusing expansions and contractions
|
||||
expIndex map[string]int // positions of expansions keyed by their string representation
|
||||
ctHandle map[string]ctHandle // contraction handles keyed by a concatenation of the suffixes
|
||||
ctElem map[string]int // contraction elements keyed by their string representation
|
||||
}
|
||||
|
||||
// A Tailoring builds a collation table based on another collation table.
|
||||
// The table is defined by specifying tailorings to the underlying table.
|
||||
// See http://unicode.org/reports/tr35/ for an overview of tailoring
|
||||
// collation tables. The CLDR contains pre-defined tailorings for a variety
|
||||
// of languages (See http://www.unicode.org/Public/cldr/<version>/core.zip.)
|
||||
type Tailoring struct {
|
||||
id string
|
||||
builder *Builder
|
||||
index *ordering
|
||||
|
||||
anchor *entry
|
||||
before bool
|
||||
}
|
||||
|
||||
// NewBuilder returns a new Builder.
|
||||
func NewBuilder() *Builder {
|
||||
return &Builder{
|
||||
index: newTrieBuilder(),
|
||||
root: makeRootOrdering(),
|
||||
expIndex: make(map[string]int),
|
||||
ctHandle: make(map[string]ctHandle),
|
||||
ctElem: make(map[string]int),
|
||||
}
|
||||
}
|
||||
|
||||
// Tailoring returns a Tailoring for the given locale. One should
|
||||
// have completed all calls to Add before calling Tailoring.
|
||||
func (b *Builder) Tailoring(loc language.Tag) *Tailoring {
|
||||
t := &Tailoring{
|
||||
id: loc.String(),
|
||||
builder: b,
|
||||
index: b.root.clone(),
|
||||
}
|
||||
t.index.id = t.id
|
||||
b.locale = append(b.locale, t)
|
||||
return t
|
||||
}
|
||||
|
||||
// Add adds an entry to the collation element table, mapping
|
||||
// a slice of runes to a sequence of collation elements.
|
||||
// A collation element is specified as list of weights: []int{primary, secondary, ...}.
|
||||
// The entries are typically obtained from a collation element table
|
||||
// as defined in http://www.unicode.org/reports/tr10/#Data_Table_Format.
|
||||
// Note that the collation elements specified by colelems are only used
|
||||
// as a guide. The actual weights generated by Builder may differ.
|
||||
// The argument variables is a list of indices into colelems that should contain
|
||||
// a value for each colelem that is a variable. (See the reference above.)
|
||||
func (b *Builder) Add(runes []rune, colelems [][]int, variables []int) error {
|
||||
str := string(runes)
|
||||
elems := make([]rawCE, len(colelems))
|
||||
for i, ce := range colelems {
|
||||
if len(ce) == 0 {
|
||||
break
|
||||
}
|
||||
elems[i] = makeRawCE(ce, 0)
|
||||
if len(ce) == 1 {
|
||||
elems[i].w[1] = defaultSecondary
|
||||
}
|
||||
if len(ce) <= 2 {
|
||||
elems[i].w[2] = defaultTertiary
|
||||
}
|
||||
if len(ce) <= 3 {
|
||||
elems[i].w[3] = ce[0]
|
||||
}
|
||||
}
|
||||
for i, ce := range elems {
|
||||
p := ce.w[0]
|
||||
isvar := false
|
||||
for _, j := range variables {
|
||||
if i == j {
|
||||
isvar = true
|
||||
}
|
||||
}
|
||||
if isvar {
|
||||
if p >= b.minNonVar && b.minNonVar > 0 {
|
||||
return fmt.Errorf("primary value %X of variable is larger than the smallest non-variable %X", p, b.minNonVar)
|
||||
}
|
||||
if p > b.varTop {
|
||||
b.varTop = p
|
||||
}
|
||||
} else if p > 1 { // 1 is a special primary value reserved for FFFE
|
||||
if p <= b.varTop {
|
||||
return fmt.Errorf("primary value %X of non-variable is smaller than the highest variable %X", p, b.varTop)
|
||||
}
|
||||
if b.minNonVar == 0 || p < b.minNonVar {
|
||||
b.minNonVar = p
|
||||
}
|
||||
}
|
||||
}
|
||||
elems, err := convertLargeWeights(elems)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cccs := []uint8{}
|
||||
nfd := norm.NFD.String(str)
|
||||
for i := range nfd {
|
||||
cccs = append(cccs, norm.NFD.PropertiesString(nfd[i:]).CCC())
|
||||
}
|
||||
if len(cccs) < len(elems) {
|
||||
if len(cccs) > 2 {
|
||||
return fmt.Errorf("number of decomposed characters should be greater or equal to the number of collation elements for len(colelems) > 3 (%d < %d)", len(cccs), len(elems))
|
||||
}
|
||||
p := len(elems) - 1
|
||||
for ; p > 0 && elems[p].w[0] == 0; p-- {
|
||||
elems[p].ccc = cccs[len(cccs)-1]
|
||||
}
|
||||
for ; p >= 0; p-- {
|
||||
elems[p].ccc = cccs[0]
|
||||
}
|
||||
} else {
|
||||
for i := range elems {
|
||||
elems[i].ccc = cccs[i]
|
||||
}
|
||||
}
|
||||
// doNorm in collate.go assumes that the following conditions hold.
|
||||
if len(elems) > 1 && len(cccs) > 1 && cccs[0] != 0 && cccs[0] != cccs[len(cccs)-1] {
|
||||
return fmt.Errorf("incompatible CCC values for expansion %X (%d)", runes, cccs)
|
||||
}
|
||||
b.root.newEntry(str, elems)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Tailoring) setAnchor(anchor string) error {
|
||||
anchor = norm.NFC.String(anchor)
|
||||
a := t.index.find(anchor)
|
||||
if a == nil {
|
||||
a = t.index.newEntry(anchor, nil)
|
||||
a.implicit = true
|
||||
a.modified = true
|
||||
for _, r := range []rune(anchor) {
|
||||
e := t.index.find(string(r))
|
||||
e.lock = true
|
||||
}
|
||||
}
|
||||
t.anchor = a
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetAnchor sets the point after which elements passed in subsequent calls to
|
||||
// Insert will be inserted. It is equivalent to the reset directive in an LDML
|
||||
// specification. See Insert for an example.
|
||||
// SetAnchor supports the following logical reset positions:
|
||||
// <first_tertiary_ignorable/>, <last_teriary_ignorable/>, <first_primary_ignorable/>,
|
||||
// and <last_non_ignorable/>.
|
||||
func (t *Tailoring) SetAnchor(anchor string) error {
|
||||
if err := t.setAnchor(anchor); err != nil {
|
||||
return err
|
||||
}
|
||||
t.before = false
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetAnchorBefore is similar to SetAnchor, except that subsequent calls to
|
||||
// Insert will insert entries before the anchor.
|
||||
func (t *Tailoring) SetAnchorBefore(anchor string) error {
|
||||
if err := t.setAnchor(anchor); err != nil {
|
||||
return err
|
||||
}
|
||||
t.before = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Insert sets the ordering of str relative to the entry set by the previous
|
||||
// call to SetAnchor or Insert. The argument extend corresponds
|
||||
// to the extend elements as defined in LDML. A non-empty value for extend
|
||||
// will cause the collation elements corresponding to extend to be appended
|
||||
// to the collation elements generated for the entry added by Insert.
|
||||
// This has the same net effect as sorting str after the string anchor+extend.
|
||||
// See http://www.unicode.org/reports/tr10/#Tailoring_Example for details
|
||||
// on parametric tailoring and http://unicode.org/reports/tr35/#Collation_Elements
|
||||
// for full details on LDML.
|
||||
//
|
||||
// Examples: create a tailoring for Swedish, where "ä" is ordered after "z"
|
||||
// at the primary sorting level:
|
||||
// t := b.Tailoring("se")
|
||||
// t.SetAnchor("z")
|
||||
// t.Insert(colltab.Primary, "ä", "")
|
||||
// Order "ü" after "ue" at the secondary sorting level:
|
||||
// t.SetAnchor("ue")
|
||||
// t.Insert(colltab.Secondary, "ü","")
|
||||
// or
|
||||
// t.SetAnchor("u")
|
||||
// t.Insert(colltab.Secondary, "ü", "e")
|
||||
// Order "q" afer "ab" at the secondary level and "Q" after "q"
|
||||
// at the tertiary level:
|
||||
// t.SetAnchor("ab")
|
||||
// t.Insert(colltab.Secondary, "q", "")
|
||||
// t.Insert(colltab.Tertiary, "Q", "")
|
||||
// Order "b" before "a":
|
||||
// t.SetAnchorBefore("a")
|
||||
// t.Insert(colltab.Primary, "b", "")
|
||||
// Order "0" after the last primary ignorable:
|
||||
// t.SetAnchor("<last_primary_ignorable/>")
|
||||
// t.Insert(colltab.Primary, "0", "")
|
||||
func (t *Tailoring) Insert(level colltab.Level, str, extend string) error {
|
||||
if t.anchor == nil {
|
||||
return fmt.Errorf("%s:Insert: no anchor point set for tailoring of %s", t.id, str)
|
||||
}
|
||||
str = norm.NFC.String(str)
|
||||
e := t.index.find(str)
|
||||
if e == nil {
|
||||
e = t.index.newEntry(str, nil)
|
||||
} else if e.logical != noAnchor {
|
||||
return fmt.Errorf("%s:Insert: cannot reinsert logical reset position %q", t.id, e.str)
|
||||
}
|
||||
if e.lock {
|
||||
return fmt.Errorf("%s:Insert: cannot reinsert element %q", t.id, e.str)
|
||||
}
|
||||
a := t.anchor
|
||||
// Find the first element after the anchor which differs at a level smaller or
|
||||
// equal to the given level. Then insert at this position.
|
||||
// See http://unicode.org/reports/tr35/#Collation_Elements, Section 5.14.5 for details.
|
||||
e.before = t.before
|
||||
if t.before {
|
||||
t.before = false
|
||||
if a.prev == nil {
|
||||
a.insertBefore(e)
|
||||
} else {
|
||||
for a = a.prev; a.level > level; a = a.prev {
|
||||
}
|
||||
a.insertAfter(e)
|
||||
}
|
||||
e.level = level
|
||||
} else {
|
||||
for ; a.level > level; a = a.next {
|
||||
}
|
||||
e.level = a.level
|
||||
if a != e {
|
||||
a.insertAfter(e)
|
||||
a.level = level
|
||||
} else {
|
||||
// We don't set a to prev itself. This has the effect of the entry
|
||||
// getting new collation elements that are an increment of itself.
|
||||
// This is intentional.
|
||||
a.prev.level = level
|
||||
}
|
||||
}
|
||||
e.extend = norm.NFD.String(extend)
|
||||
e.exclude = false
|
||||
e.modified = true
|
||||
e.elems = nil
|
||||
t.anchor = e
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *ordering) getWeight(e *entry) []rawCE {
|
||||
if len(e.elems) == 0 && e.logical == noAnchor {
|
||||
if e.implicit {
|
||||
for _, r := range e.runes {
|
||||
e.elems = append(e.elems, o.getWeight(o.find(string(r)))...)
|
||||
}
|
||||
} else if e.before {
|
||||
count := [colltab.Identity + 1]int{}
|
||||
a := e
|
||||
for ; a.elems == nil && !a.implicit; a = a.next {
|
||||
count[a.level]++
|
||||
}
|
||||
e.elems = []rawCE{makeRawCE(a.elems[0].w, a.elems[0].ccc)}
|
||||
for i := colltab.Primary; i < colltab.Quaternary; i++ {
|
||||
if count[i] != 0 {
|
||||
e.elems[0].w[i] -= count[i]
|
||||
break
|
||||
}
|
||||
}
|
||||
if e.prev != nil {
|
||||
o.verifyWeights(e.prev, e, e.prev.level)
|
||||
}
|
||||
} else {
|
||||
prev := e.prev
|
||||
e.elems = nextWeight(prev.level, o.getWeight(prev))
|
||||
o.verifyWeights(e, e.next, e.level)
|
||||
}
|
||||
}
|
||||
return e.elems
|
||||
}
|
||||
|
||||
func (o *ordering) addExtension(e *entry) {
|
||||
if ex := o.find(e.extend); ex != nil {
|
||||
e.elems = append(e.elems, ex.elems...)
|
||||
} else {
|
||||
for _, r := range []rune(e.extend) {
|
||||
e.elems = append(e.elems, o.find(string(r)).elems...)
|
||||
}
|
||||
}
|
||||
e.extend = ""
|
||||
}
|
||||
|
||||
func (o *ordering) verifyWeights(a, b *entry, level colltab.Level) error {
|
||||
if level == colltab.Identity || b == nil || b.elems == nil || a.elems == nil {
|
||||
return nil
|
||||
}
|
||||
for i := colltab.Primary; i < level; i++ {
|
||||
if a.elems[0].w[i] < b.elems[0].w[i] {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
if a.elems[0].w[level] >= b.elems[0].w[level] {
|
||||
err := fmt.Errorf("%s:overflow: collation elements of %q (%X) overflows those of %q (%X) at level %d (%X >= %X)", o.id, a.str, a.runes, b.str, b.runes, level, a.elems, b.elems)
|
||||
log.Println(err)
|
||||
// TODO: return the error instead, or better, fix the conflicting entry by making room.
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Builder) error(e error) {
|
||||
if e != nil {
|
||||
b.err = e
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Builder) errorID(locale string, e error) {
|
||||
if e != nil {
|
||||
b.err = fmt.Errorf("%s:%v", locale, e)
|
||||
}
|
||||
}
|
||||
|
||||
// patchNorm ensures that NFC and NFD counterparts are consistent.
|
||||
func (o *ordering) patchNorm() {
|
||||
// Insert the NFD counterparts, if necessary.
|
||||
for _, e := range o.ordered {
|
||||
nfd := norm.NFD.String(e.str)
|
||||
if nfd != e.str {
|
||||
if e0 := o.find(nfd); e0 != nil && !e0.modified {
|
||||
e0.elems = e.elems
|
||||
} else if e.modified && !equalCEArrays(o.genColElems(nfd), e.elems) {
|
||||
e := o.newEntry(nfd, e.elems)
|
||||
e.modified = true
|
||||
}
|
||||
}
|
||||
}
|
||||
// Update unchanged composed forms if one of their parts changed.
|
||||
for _, e := range o.ordered {
|
||||
nfd := norm.NFD.String(e.str)
|
||||
if e.modified || nfd == e.str {
|
||||
continue
|
||||
}
|
||||
if e0 := o.find(nfd); e0 != nil {
|
||||
e.elems = e0.elems
|
||||
} else {
|
||||
e.elems = o.genColElems(nfd)
|
||||
if norm.NFD.LastBoundary([]byte(nfd)) == 0 {
|
||||
r := []rune(nfd)
|
||||
head := string(r[0])
|
||||
tail := ""
|
||||
for i := 1; i < len(r); i++ {
|
||||
s := norm.NFC.String(head + string(r[i]))
|
||||
if e0 := o.find(s); e0 != nil && e0.modified {
|
||||
head = s
|
||||
} else {
|
||||
tail += string(r[i])
|
||||
}
|
||||
}
|
||||
e.elems = append(o.genColElems(head), o.genColElems(tail)...)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Exclude entries for which the individual runes generate the same collation elements.
|
||||
for _, e := range o.ordered {
|
||||
if len(e.runes) > 1 && equalCEArrays(o.genColElems(e.str), e.elems) {
|
||||
e.exclude = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Builder) buildOrdering(o *ordering) {
|
||||
for _, e := range o.ordered {
|
||||
o.getWeight(e)
|
||||
}
|
||||
for _, e := range o.ordered {
|
||||
o.addExtension(e)
|
||||
}
|
||||
o.patchNorm()
|
||||
o.sort()
|
||||
simplify(o)
|
||||
b.processExpansions(o) // requires simplify
|
||||
b.processContractions(o) // requires simplify
|
||||
|
||||
t := newNode()
|
||||
for e := o.front(); e != nil; e, _ = e.nextIndexed() {
|
||||
if !e.skip() {
|
||||
ce, err := e.encode()
|
||||
b.errorID(o.id, err)
|
||||
t.insert(e.runes[0], ce)
|
||||
}
|
||||
}
|
||||
o.handle = b.index.addTrie(t)
|
||||
}
|
||||
|
||||
func (b *Builder) build() (*table, error) {
|
||||
if b.built {
|
||||
return b.t, b.err
|
||||
}
|
||||
b.built = true
|
||||
b.t = &table{
|
||||
Table: colltab.Table{
|
||||
MaxContractLen: utf8.UTFMax,
|
||||
VariableTop: uint32(b.varTop),
|
||||
},
|
||||
}
|
||||
|
||||
b.buildOrdering(&b.root)
|
||||
b.t.root = b.root.handle
|
||||
for _, t := range b.locale {
|
||||
b.buildOrdering(t.index)
|
||||
if b.err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
i, err := b.index.generate()
|
||||
b.t.trie = *i
|
||||
b.t.Index = colltab.Trie{
|
||||
Index: i.index,
|
||||
Values: i.values,
|
||||
Index0: i.index[blockSize*b.t.root.lookupStart:],
|
||||
Values0: i.values[blockSize*b.t.root.valueStart:],
|
||||
}
|
||||
b.error(err)
|
||||
return b.t, b.err
|
||||
}
|
||||
|
||||
// Build builds the root Collator.
|
||||
func (b *Builder) Build() (colltab.Weighter, error) {
|
||||
table, err := b.build()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return table, nil
|
||||
}
|
||||
|
||||
// Build builds a Collator for Tailoring t.
|
||||
func (t *Tailoring) Build() (colltab.Weighter, error) {
|
||||
// TODO: implement.
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Print prints the tables for b and all its Tailorings as a Go file
|
||||
// that can be included in the Collate package.
|
||||
func (b *Builder) Print(w io.Writer) (n int, err error) {
|
||||
p := func(nn int, e error) {
|
||||
n += nn
|
||||
if err == nil {
|
||||
err = e
|
||||
}
|
||||
}
|
||||
t, err := b.build()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
p(fmt.Fprintf(w, `var availableLocales = "und`))
|
||||
for _, loc := range b.locale {
|
||||
if loc.id != "und" {
|
||||
p(fmt.Fprintf(w, ",%s", loc.id))
|
||||
}
|
||||
}
|
||||
p(fmt.Fprint(w, "\"\n\n"))
|
||||
p(fmt.Fprintf(w, "const varTop = 0x%x\n\n", b.varTop))
|
||||
p(fmt.Fprintln(w, "var locales = [...]tableIndex{"))
|
||||
for _, loc := range b.locale {
|
||||
if loc.id == "und" {
|
||||
p(t.fprintIndex(w, loc.index.handle, loc.id))
|
||||
}
|
||||
}
|
||||
for _, loc := range b.locale {
|
||||
if loc.id != "und" {
|
||||
p(t.fprintIndex(w, loc.index.handle, loc.id))
|
||||
}
|
||||
}
|
||||
p(fmt.Fprint(w, "}\n\n"))
|
||||
n, _, err = t.fprint(w, "main")
|
||||
return
|
||||
}
|
||||
|
||||
// reproducibleFromNFKD checks whether the given expansion could be generated
|
||||
// from an NFKD expansion.
|
||||
func reproducibleFromNFKD(e *entry, exp, nfkd []rawCE) bool {
|
||||
// Length must be equal.
|
||||
if len(exp) != len(nfkd) {
|
||||
return false
|
||||
}
|
||||
for i, ce := range exp {
|
||||
// Primary and secondary values should be equal.
|
||||
if ce.w[0] != nfkd[i].w[0] || ce.w[1] != nfkd[i].w[1] {
|
||||
return false
|
||||
}
|
||||
// Tertiary values should be equal to maxTertiary for third element onwards.
|
||||
// TODO: there seem to be a lot of cases in CLDR (e.g. ㏭ in zh.xml) that can
|
||||
// simply be dropped. Try this out by dropping the following code.
|
||||
if i >= 2 && ce.w[2] != maxTertiary {
|
||||
return false
|
||||
}
|
||||
if _, err := makeCE(ce); err != nil {
|
||||
// Simply return false. The error will be caught elsewhere.
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func simplify(o *ordering) {
|
||||
// Runes that are a starter of a contraction should not be removed.
|
||||
// (To date, there is only Kannada character 0CCA.)
|
||||
keep := make(map[rune]bool)
|
||||
for e := o.front(); e != nil; e, _ = e.nextIndexed() {
|
||||
if len(e.runes) > 1 {
|
||||
keep[e.runes[0]] = true
|
||||
}
|
||||
}
|
||||
// Tag entries for which the runes NFKD decompose to identical values.
|
||||
for e := o.front(); e != nil; e, _ = e.nextIndexed() {
|
||||
s := e.str
|
||||
nfkd := norm.NFKD.String(s)
|
||||
nfd := norm.NFD.String(s)
|
||||
if e.decompose || len(e.runes) > 1 || len(e.elems) == 1 || keep[e.runes[0]] || nfkd == nfd {
|
||||
continue
|
||||
}
|
||||
if reproducibleFromNFKD(e, e.elems, o.genColElems(nfkd)) {
|
||||
e.decompose = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// appendExpansion converts the given collation sequence to
|
||||
// collation elements and adds them to the expansion table.
|
||||
// It returns an index to the expansion table.
|
||||
func (b *Builder) appendExpansion(e *entry) int {
|
||||
t := b.t
|
||||
i := len(t.ExpandElem)
|
||||
ce := uint32(len(e.elems))
|
||||
t.ExpandElem = append(t.ExpandElem, ce)
|
||||
for _, w := range e.elems {
|
||||
ce, err := makeCE(w)
|
||||
if err != nil {
|
||||
b.error(err)
|
||||
return -1
|
||||
}
|
||||
t.ExpandElem = append(t.ExpandElem, ce)
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// processExpansions extracts data necessary to generate
|
||||
// the extraction tables.
|
||||
func (b *Builder) processExpansions(o *ordering) {
|
||||
for e := o.front(); e != nil; e, _ = e.nextIndexed() {
|
||||
if !e.expansion() {
|
||||
continue
|
||||
}
|
||||
key := fmt.Sprintf("%v", e.elems)
|
||||
i, ok := b.expIndex[key]
|
||||
if !ok {
|
||||
i = b.appendExpansion(e)
|
||||
b.expIndex[key] = i
|
||||
}
|
||||
e.expansionIndex = i
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Builder) processContractions(o *ordering) {
|
||||
// Collate contractions per starter rune.
|
||||
starters := []rune{}
|
||||
cm := make(map[rune][]*entry)
|
||||
for e := o.front(); e != nil; e, _ = e.nextIndexed() {
|
||||
if e.contraction() {
|
||||
if len(e.str) > b.t.MaxContractLen {
|
||||
b.t.MaxContractLen = len(e.str)
|
||||
}
|
||||
r := e.runes[0]
|
||||
if _, ok := cm[r]; !ok {
|
||||
starters = append(starters, r)
|
||||
}
|
||||
cm[r] = append(cm[r], e)
|
||||
}
|
||||
}
|
||||
// Add entries of single runes that are at a start of a contraction.
|
||||
for e := o.front(); e != nil; e, _ = e.nextIndexed() {
|
||||
if !e.contraction() {
|
||||
r := e.runes[0]
|
||||
if _, ok := cm[r]; ok {
|
||||
cm[r] = append(cm[r], e)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Build the tries for the contractions.
|
||||
t := b.t
|
||||
for _, r := range starters {
|
||||
l := cm[r]
|
||||
// Compute suffix strings. There are 31 different contraction suffix
|
||||
// sets for 715 contractions and 82 contraction starter runes as of
|
||||
// version 6.0.0.
|
||||
sufx := []string{}
|
||||
hasSingle := false
|
||||
for _, e := range l {
|
||||
if len(e.runes) > 1 {
|
||||
sufx = append(sufx, string(e.runes[1:]))
|
||||
} else {
|
||||
hasSingle = true
|
||||
}
|
||||
}
|
||||
if !hasSingle {
|
||||
b.error(fmt.Errorf("no single entry for starter rune %U found", r))
|
||||
continue
|
||||
}
|
||||
// Unique the suffix set.
|
||||
sort.Strings(sufx)
|
||||
key := strings.Join(sufx, "\n")
|
||||
handle, ok := b.ctHandle[key]
|
||||
if !ok {
|
||||
var err error
|
||||
handle, err = appendTrie(&t.ContractTries, sufx)
|
||||
if err != nil {
|
||||
b.error(err)
|
||||
}
|
||||
b.ctHandle[key] = handle
|
||||
}
|
||||
// Bucket sort entries in index order.
|
||||
es := make([]*entry, len(l))
|
||||
for _, e := range l {
|
||||
var p, sn int
|
||||
if len(e.runes) > 1 {
|
||||
str := []byte(string(e.runes[1:]))
|
||||
p, sn = lookup(&t.ContractTries, handle, str)
|
||||
if sn != len(str) {
|
||||
log.Fatalf("%s: processContractions: unexpected length for '%X'; len=%d; want %d", o.id, e.runes, sn, len(str))
|
||||
}
|
||||
}
|
||||
if es[p] != nil {
|
||||
log.Fatalf("%s: multiple contractions for position %d for rune %U", o.id, p, e.runes[0])
|
||||
}
|
||||
es[p] = e
|
||||
}
|
||||
// Create collation elements for contractions.
|
||||
elems := []uint32{}
|
||||
for _, e := range es {
|
||||
ce, err := e.encodeBase()
|
||||
b.errorID(o.id, err)
|
||||
elems = append(elems, ce)
|
||||
}
|
||||
key = fmt.Sprintf("%v", elems)
|
||||
i, ok := b.ctElem[key]
|
||||
if !ok {
|
||||
i = len(t.ContractElem)
|
||||
b.ctElem[key] = i
|
||||
t.ContractElem = append(t.ContractElem, elems...)
|
||||
}
|
||||
// Store info in entry for starter rune.
|
||||
es[0].contractionIndex = i
|
||||
es[0].contractionHandle = handle
|
||||
}
|
||||
}
|
294
vendor/golang.org/x/text/collate/build/colelem.go
generated
vendored
294
vendor/golang.org/x/text/collate/build/colelem.go
generated
vendored
@ -1,294 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package build
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"unicode"
|
||||
|
||||
"golang.org/x/text/internal/colltab"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultSecondary = 0x20
|
||||
defaultTertiary = 0x2
|
||||
maxTertiary = 0x1F
|
||||
)
|
||||
|
||||
type rawCE struct {
|
||||
w []int
|
||||
ccc uint8
|
||||
}
|
||||
|
||||
func makeRawCE(w []int, ccc uint8) rawCE {
|
||||
ce := rawCE{w: make([]int, 4), ccc: ccc}
|
||||
copy(ce.w, w)
|
||||
return ce
|
||||
}
|
||||
|
||||
// A collation element is represented as an uint32.
|
||||
// In the typical case, a rune maps to a single collation element. If a rune
|
||||
// can be the start of a contraction or expands into multiple collation elements,
|
||||
// then the collation element that is associated with a rune will have a special
|
||||
// form to represent such m to n mappings. Such special collation elements
|
||||
// have a value >= 0x80000000.
|
||||
|
||||
const (
|
||||
maxPrimaryBits = 21
|
||||
maxSecondaryBits = 12
|
||||
maxTertiaryBits = 8
|
||||
)
|
||||
|
||||
func makeCE(ce rawCE) (uint32, error) {
|
||||
v, e := colltab.MakeElem(ce.w[0], ce.w[1], ce.w[2], ce.ccc)
|
||||
return uint32(v), e
|
||||
}
|
||||
|
||||
// For contractions, collation elements are of the form
|
||||
// 110bbbbb bbbbbbbb iiiiiiii iiiinnnn, where
|
||||
// - n* is the size of the first node in the contraction trie.
|
||||
// - i* is the index of the first node in the contraction trie.
|
||||
// - b* is the offset into the contraction collation element table.
|
||||
// See contract.go for details on the contraction trie.
|
||||
const (
|
||||
contractID = 0xC0000000
|
||||
maxNBits = 4
|
||||
maxTrieIndexBits = 12
|
||||
maxContractOffsetBits = 13
|
||||
)
|
||||
|
||||
func makeContractIndex(h ctHandle, offset int) (uint32, error) {
|
||||
if h.n >= 1<<maxNBits {
|
||||
return 0, fmt.Errorf("size of contraction trie node too large: %d >= %d", h.n, 1<<maxNBits)
|
||||
}
|
||||
if h.index >= 1<<maxTrieIndexBits {
|
||||
return 0, fmt.Errorf("size of contraction trie offset too large: %d >= %d", h.index, 1<<maxTrieIndexBits)
|
||||
}
|
||||
if offset >= 1<<maxContractOffsetBits {
|
||||
return 0, fmt.Errorf("contraction offset out of bounds: %x >= %x", offset, 1<<maxContractOffsetBits)
|
||||
}
|
||||
ce := uint32(contractID)
|
||||
ce += uint32(offset << (maxNBits + maxTrieIndexBits))
|
||||
ce += uint32(h.index << maxNBits)
|
||||
ce += uint32(h.n)
|
||||
return ce, nil
|
||||
}
|
||||
|
||||
// For expansions, collation elements are of the form
|
||||
// 11100000 00000000 bbbbbbbb bbbbbbbb,
|
||||
// where b* is the index into the expansion sequence table.
|
||||
const (
|
||||
expandID = 0xE0000000
|
||||
maxExpandIndexBits = 16
|
||||
)
|
||||
|
||||
func makeExpandIndex(index int) (uint32, error) {
|
||||
if index >= 1<<maxExpandIndexBits {
|
||||
return 0, fmt.Errorf("expansion index out of bounds: %x >= %x", index, 1<<maxExpandIndexBits)
|
||||
}
|
||||
return expandID + uint32(index), nil
|
||||
}
|
||||
|
||||
// Each list of collation elements corresponding to an expansion starts with
|
||||
// a header indicating the length of the sequence.
|
||||
func makeExpansionHeader(n int) (uint32, error) {
|
||||
return uint32(n), nil
|
||||
}
|
||||
|
||||
// Some runes can be expanded using NFKD decomposition. Instead of storing the full
|
||||
// sequence of collation elements, we decompose the rune and lookup the collation
|
||||
// elements for each rune in the decomposition and modify the tertiary weights.
|
||||
// The collation element, in this case, is of the form
|
||||
// 11110000 00000000 wwwwwwww vvvvvvvv, where
|
||||
// - v* is the replacement tertiary weight for the first rune,
|
||||
// - w* is the replacement tertiary weight for the second rune,
|
||||
// Tertiary weights of subsequent runes should be replaced with maxTertiary.
|
||||
// See http://www.unicode.org/reports/tr10/#Compatibility_Decompositions for more details.
|
||||
const (
|
||||
decompID = 0xF0000000
|
||||
)
|
||||
|
||||
func makeDecompose(t1, t2 int) (uint32, error) {
|
||||
if t1 >= 256 || t1 < 0 {
|
||||
return 0, fmt.Errorf("first tertiary weight out of bounds: %d >= 256", t1)
|
||||
}
|
||||
if t2 >= 256 || t2 < 0 {
|
||||
return 0, fmt.Errorf("second tertiary weight out of bounds: %d >= 256", t2)
|
||||
}
|
||||
return uint32(t2<<8+t1) + decompID, nil
|
||||
}
|
||||
|
||||
const (
|
||||
// These constants were taken from http://www.unicode.org/versions/Unicode6.0.0/ch12.pdf.
|
||||
minUnified rune = 0x4E00
|
||||
maxUnified = 0x9FFF
|
||||
minCompatibility = 0xF900
|
||||
maxCompatibility = 0xFAFF
|
||||
minRare = 0x3400
|
||||
maxRare = 0x4DBF
|
||||
)
|
||||
const (
|
||||
commonUnifiedOffset = 0x10000
|
||||
rareUnifiedOffset = 0x20000 // largest rune in common is U+FAFF
|
||||
otherOffset = 0x50000 // largest rune in rare is U+2FA1D
|
||||
illegalOffset = otherOffset + int(unicode.MaxRune)
|
||||
maxPrimary = illegalOffset + 1
|
||||
)
|
||||
|
||||
// implicitPrimary returns the primary weight for the a rune
|
||||
// for which there is no entry for the rune in the collation table.
|
||||
// We take a different approach from the one specified in
|
||||
// http://unicode.org/reports/tr10/#Implicit_Weights,
|
||||
// but preserve the resulting relative ordering of the runes.
|
||||
func implicitPrimary(r rune) int {
|
||||
if unicode.Is(unicode.Ideographic, r) {
|
||||
if r >= minUnified && r <= maxUnified {
|
||||
// The most common case for CJK.
|
||||
return int(r) + commonUnifiedOffset
|
||||
}
|
||||
if r >= minCompatibility && r <= maxCompatibility {
|
||||
// This will typically not hit. The DUCET explicitly specifies mappings
|
||||
// for all characters that do not decompose.
|
||||
return int(r) + commonUnifiedOffset
|
||||
}
|
||||
return int(r) + rareUnifiedOffset
|
||||
}
|
||||
return int(r) + otherOffset
|
||||
}
|
||||
|
||||
// convertLargeWeights converts collation elements with large
|
||||
// primaries (either double primaries or for illegal runes)
|
||||
// to our own representation.
|
||||
// A CJK character C is represented in the DUCET as
|
||||
// [.FBxx.0020.0002.C][.BBBB.0000.0000.C]
|
||||
// We will rewrite these characters to a single CE.
|
||||
// We assume the CJK values start at 0x8000.
|
||||
// See http://unicode.org/reports/tr10/#Implicit_Weights
|
||||
func convertLargeWeights(elems []rawCE) (res []rawCE, err error) {
|
||||
const (
|
||||
cjkPrimaryStart = 0xFB40
|
||||
rarePrimaryStart = 0xFB80
|
||||
otherPrimaryStart = 0xFBC0
|
||||
illegalPrimary = 0xFFFE
|
||||
highBitsMask = 0x3F
|
||||
lowBitsMask = 0x7FFF
|
||||
lowBitsFlag = 0x8000
|
||||
shiftBits = 15
|
||||
)
|
||||
for i := 0; i < len(elems); i++ {
|
||||
ce := elems[i].w
|
||||
p := ce[0]
|
||||
if p < cjkPrimaryStart {
|
||||
continue
|
||||
}
|
||||
if p > 0xFFFF {
|
||||
return elems, fmt.Errorf("found primary weight %X; should be <= 0xFFFF", p)
|
||||
}
|
||||
if p >= illegalPrimary {
|
||||
ce[0] = illegalOffset + p - illegalPrimary
|
||||
} else {
|
||||
if i+1 >= len(elems) {
|
||||
return elems, fmt.Errorf("second part of double primary weight missing: %v", elems)
|
||||
}
|
||||
if elems[i+1].w[0]&lowBitsFlag == 0 {
|
||||
return elems, fmt.Errorf("malformed second part of double primary weight: %v", elems)
|
||||
}
|
||||
np := ((p & highBitsMask) << shiftBits) + elems[i+1].w[0]&lowBitsMask
|
||||
switch {
|
||||
case p < rarePrimaryStart:
|
||||
np += commonUnifiedOffset
|
||||
case p < otherPrimaryStart:
|
||||
np += rareUnifiedOffset
|
||||
default:
|
||||
p += otherOffset
|
||||
}
|
||||
ce[0] = np
|
||||
for j := i + 1; j+1 < len(elems); j++ {
|
||||
elems[j] = elems[j+1]
|
||||
}
|
||||
elems = elems[:len(elems)-1]
|
||||
}
|
||||
}
|
||||
return elems, nil
|
||||
}
|
||||
|
||||
// nextWeight computes the first possible collation weights following elems
|
||||
// for the given level.
|
||||
func nextWeight(level colltab.Level, elems []rawCE) []rawCE {
|
||||
if level == colltab.Identity {
|
||||
next := make([]rawCE, len(elems))
|
||||
copy(next, elems)
|
||||
return next
|
||||
}
|
||||
next := []rawCE{makeRawCE(elems[0].w, elems[0].ccc)}
|
||||
next[0].w[level]++
|
||||
if level < colltab.Secondary {
|
||||
next[0].w[colltab.Secondary] = defaultSecondary
|
||||
}
|
||||
if level < colltab.Tertiary {
|
||||
next[0].w[colltab.Tertiary] = defaultTertiary
|
||||
}
|
||||
// Filter entries that cannot influence ordering.
|
||||
for _, ce := range elems[1:] {
|
||||
skip := true
|
||||
for i := colltab.Primary; i < level; i++ {
|
||||
skip = skip && ce.w[i] == 0
|
||||
}
|
||||
if !skip {
|
||||
next = append(next, ce)
|
||||
}
|
||||
}
|
||||
return next
|
||||
}
|
||||
|
||||
func nextVal(elems []rawCE, i int, level colltab.Level) (index, value int) {
|
||||
for ; i < len(elems) && elems[i].w[level] == 0; i++ {
|
||||
}
|
||||
if i < len(elems) {
|
||||
return i, elems[i].w[level]
|
||||
}
|
||||
return i, 0
|
||||
}
|
||||
|
||||
// compareWeights returns -1 if a < b, 1 if a > b, or 0 otherwise.
|
||||
// It also returns the collation level at which the difference is found.
|
||||
func compareWeights(a, b []rawCE) (result int, level colltab.Level) {
|
||||
for level := colltab.Primary; level < colltab.Identity; level++ {
|
||||
var va, vb int
|
||||
for ia, ib := 0, 0; ia < len(a) || ib < len(b); ia, ib = ia+1, ib+1 {
|
||||
ia, va = nextVal(a, ia, level)
|
||||
ib, vb = nextVal(b, ib, level)
|
||||
if va != vb {
|
||||
if va < vb {
|
||||
return -1, level
|
||||
} else {
|
||||
return 1, level
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0, colltab.Identity
|
||||
}
|
||||
|
||||
func equalCE(a, b rawCE) bool {
|
||||
for i := 0; i < 3; i++ {
|
||||
if b.w[i] != a.w[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func equalCEArrays(a, b []rawCE) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for i := range a {
|
||||
if !equalCE(a[i], b[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
309
vendor/golang.org/x/text/collate/build/contract.go
generated
vendored
309
vendor/golang.org/x/text/collate/build/contract.go
generated
vendored
@ -1,309 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package build
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/text/internal/colltab"
|
||||
)
|
||||
|
||||
// This file contains code for detecting contractions and generating
|
||||
// the necessary tables.
|
||||
// Any Unicode Collation Algorithm (UCA) table entry that has more than
|
||||
// one rune one the left-hand side is called a contraction.
|
||||
// See http://www.unicode.org/reports/tr10/#Contractions for more details.
|
||||
//
|
||||
// We define the following terms:
|
||||
// initial: a rune that appears as the first rune in a contraction.
|
||||
// suffix: a sequence of runes succeeding the initial rune
|
||||
// in a given contraction.
|
||||
// non-initial: a rune that appears in a suffix.
|
||||
//
|
||||
// A rune may be both an initial and a non-initial and may be so in
|
||||
// many contractions. An initial may typically also appear by itself.
|
||||
// In case of ambiguities, the UCA requires we match the longest
|
||||
// contraction.
|
||||
//
|
||||
// Many contraction rules share the same set of possible suffixes.
|
||||
// We store sets of suffixes in a trie that associates an index with
|
||||
// each suffix in the set. This index can be used to look up a
|
||||
// collation element associated with the (starter rune, suffix) pair.
|
||||
//
|
||||
// The trie is defined on a UTF-8 byte sequence.
|
||||
// The overall trie is represented as an array of ctEntries. Each node of the trie
|
||||
// is represented as a subsequence of ctEntries, where each entry corresponds to
|
||||
// a possible match of a next character in the search string. An entry
|
||||
// also includes the length and offset to the next sequence of entries
|
||||
// to check in case of a match.
|
||||
|
||||
const (
|
||||
final = 0
|
||||
noIndex = 0xFF
|
||||
)
|
||||
|
||||
// ctEntry associates to a matching byte an offset and/or next sequence of
|
||||
// bytes to check. A ctEntry c is called final if a match means that the
|
||||
// longest suffix has been found. An entry c is final if c.N == 0.
|
||||
// A single final entry can match a range of characters to an offset.
|
||||
// A non-final entry always matches a single byte. Note that a non-final
|
||||
// entry might still resemble a completed suffix.
|
||||
// Examples:
|
||||
// The suffix strings "ab" and "ac" can be represented as:
|
||||
// []ctEntry{
|
||||
// {'a', 1, 1, noIndex}, // 'a' by itself does not match, so i is 0xFF.
|
||||
// {'b', 'c', 0, 1}, // "ab" -> 1, "ac" -> 2
|
||||
// }
|
||||
//
|
||||
// The suffix strings "ab", "abc", "abd", and "abcd" can be represented as:
|
||||
// []ctEntry{
|
||||
// {'a', 1, 1, noIndex}, // 'a' must be followed by 'b'.
|
||||
// {'b', 1, 2, 1}, // "ab" -> 1, may be followed by 'c' or 'd'.
|
||||
// {'d', 'd', final, 3}, // "abd" -> 3
|
||||
// {'c', 4, 1, 2}, // "abc" -> 2, may be followed by 'd'.
|
||||
// {'d', 'd', final, 4}, // "abcd" -> 4
|
||||
// }
|
||||
// See genStateTests in contract_test.go for more examples.
|
||||
type ctEntry struct {
|
||||
L uint8 // non-final: byte value to match; final: lowest match in range.
|
||||
H uint8 // non-final: relative index to next block; final: highest match in range.
|
||||
N uint8 // non-final: length of next block; final: final
|
||||
I uint8 // result offset. Will be noIndex if more bytes are needed to complete.
|
||||
}
|
||||
|
||||
// contractTrieSet holds a set of contraction tries. The tries are stored
|
||||
// consecutively in the entry field.
|
||||
type contractTrieSet []struct{ l, h, n, i uint8 }
|
||||
|
||||
// ctHandle is used to identify a trie in the trie set, consisting in an offset
|
||||
// in the array and the size of the first node.
|
||||
type ctHandle struct {
|
||||
index, n int
|
||||
}
|
||||
|
||||
// appendTrie adds a new trie for the given suffixes to the trie set and returns
|
||||
// a handle to it. The handle will be invalid on error.
|
||||
func appendTrie(ct *colltab.ContractTrieSet, suffixes []string) (ctHandle, error) {
|
||||
es := make([]stridx, len(suffixes))
|
||||
for i, s := range suffixes {
|
||||
es[i].str = s
|
||||
}
|
||||
sort.Sort(offsetSort(es))
|
||||
for i := range es {
|
||||
es[i].index = i + 1
|
||||
}
|
||||
sort.Sort(genidxSort(es))
|
||||
i := len(*ct)
|
||||
n, err := genStates(ct, es)
|
||||
if err != nil {
|
||||
*ct = (*ct)[:i]
|
||||
return ctHandle{}, err
|
||||
}
|
||||
return ctHandle{i, n}, nil
|
||||
}
|
||||
|
||||
// genStates generates ctEntries for a given suffix set and returns
|
||||
// the number of entries for the first node.
|
||||
func genStates(ct *colltab.ContractTrieSet, sis []stridx) (int, error) {
|
||||
if len(sis) == 0 {
|
||||
return 0, fmt.Errorf("genStates: list of suffices must be non-empty")
|
||||
}
|
||||
start := len(*ct)
|
||||
// create entries for differing first bytes.
|
||||
for _, si := range sis {
|
||||
s := si.str
|
||||
if len(s) == 0 {
|
||||
continue
|
||||
}
|
||||
added := false
|
||||
c := s[0]
|
||||
if len(s) > 1 {
|
||||
for j := len(*ct) - 1; j >= start; j-- {
|
||||
if (*ct)[j].L == c {
|
||||
added = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !added {
|
||||
*ct = append(*ct, ctEntry{L: c, I: noIndex})
|
||||
}
|
||||
} else {
|
||||
for j := len(*ct) - 1; j >= start; j-- {
|
||||
// Update the offset for longer suffixes with the same byte.
|
||||
if (*ct)[j].L == c {
|
||||
(*ct)[j].I = uint8(si.index)
|
||||
added = true
|
||||
}
|
||||
// Extend range of final ctEntry, if possible.
|
||||
if (*ct)[j].H+1 == c {
|
||||
(*ct)[j].H = c
|
||||
added = true
|
||||
}
|
||||
}
|
||||
if !added {
|
||||
*ct = append(*ct, ctEntry{L: c, H: c, N: final, I: uint8(si.index)})
|
||||
}
|
||||
}
|
||||
}
|
||||
n := len(*ct) - start
|
||||
// Append nodes for the remainder of the suffixes for each ctEntry.
|
||||
sp := 0
|
||||
for i, end := start, len(*ct); i < end; i++ {
|
||||
fe := (*ct)[i]
|
||||
if fe.H == 0 { // uninitialized non-final
|
||||
ln := len(*ct) - start - n
|
||||
if ln > 0xFF {
|
||||
return 0, fmt.Errorf("genStates: relative block offset too large: %d > 255", ln)
|
||||
}
|
||||
fe.H = uint8(ln)
|
||||
// Find first non-final strings with same byte as current entry.
|
||||
for ; sis[sp].str[0] != fe.L; sp++ {
|
||||
}
|
||||
se := sp + 1
|
||||
for ; se < len(sis) && len(sis[se].str) > 1 && sis[se].str[0] == fe.L; se++ {
|
||||
}
|
||||
sl := sis[sp:se]
|
||||
sp = se
|
||||
for i, si := range sl {
|
||||
sl[i].str = si.str[1:]
|
||||
}
|
||||
nn, err := genStates(ct, sl)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
fe.N = uint8(nn)
|
||||
(*ct)[i] = fe
|
||||
}
|
||||
}
|
||||
sort.Sort(entrySort((*ct)[start : start+n]))
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// There may be both a final and non-final entry for a byte if the byte
|
||||
// is implied in a range of matches in the final entry.
|
||||
// We need to ensure that the non-final entry comes first in that case.
|
||||
type entrySort colltab.ContractTrieSet
|
||||
|
||||
func (fe entrySort) Len() int { return len(fe) }
|
||||
func (fe entrySort) Swap(i, j int) { fe[i], fe[j] = fe[j], fe[i] }
|
||||
func (fe entrySort) Less(i, j int) bool {
|
||||
return fe[i].L > fe[j].L
|
||||
}
|
||||
|
||||
// stridx is used for sorting suffixes and their associated offsets.
|
||||
type stridx struct {
|
||||
str string
|
||||
index int
|
||||
}
|
||||
|
||||
// For computing the offsets, we first sort by size, and then by string.
|
||||
// This ensures that strings that only differ in the last byte by 1
|
||||
// are sorted consecutively in increasing order such that they can
|
||||
// be packed as a range in a final ctEntry.
|
||||
type offsetSort []stridx
|
||||
|
||||
func (si offsetSort) Len() int { return len(si) }
|
||||
func (si offsetSort) Swap(i, j int) { si[i], si[j] = si[j], si[i] }
|
||||
func (si offsetSort) Less(i, j int) bool {
|
||||
if len(si[i].str) != len(si[j].str) {
|
||||
return len(si[i].str) > len(si[j].str)
|
||||
}
|
||||
return si[i].str < si[j].str
|
||||
}
|
||||
|
||||
// For indexing, we want to ensure that strings are sorted in string order, where
|
||||
// for strings with the same prefix, we put longer strings before shorter ones.
|
||||
type genidxSort []stridx
|
||||
|
||||
func (si genidxSort) Len() int { return len(si) }
|
||||
func (si genidxSort) Swap(i, j int) { si[i], si[j] = si[j], si[i] }
|
||||
func (si genidxSort) Less(i, j int) bool {
|
||||
if strings.HasPrefix(si[j].str, si[i].str) {
|
||||
return false
|
||||
}
|
||||
if strings.HasPrefix(si[i].str, si[j].str) {
|
||||
return true
|
||||
}
|
||||
return si[i].str < si[j].str
|
||||
}
|
||||
|
||||
// lookup matches the longest suffix in str and returns the associated offset
|
||||
// and the number of bytes consumed.
|
||||
func lookup(ct *colltab.ContractTrieSet, h ctHandle, str []byte) (index, ns int) {
|
||||
states := (*ct)[h.index:]
|
||||
p := 0
|
||||
n := h.n
|
||||
for i := 0; i < n && p < len(str); {
|
||||
e := states[i]
|
||||
c := str[p]
|
||||
if c >= e.L {
|
||||
if e.L == c {
|
||||
p++
|
||||
if e.I != noIndex {
|
||||
index, ns = int(e.I), p
|
||||
}
|
||||
if e.N != final {
|
||||
// set to new state
|
||||
i, states, n = 0, states[int(e.H)+n:], int(e.N)
|
||||
} else {
|
||||
return
|
||||
}
|
||||
continue
|
||||
} else if e.N == final && c <= e.H {
|
||||
p++
|
||||
return int(c-e.L) + int(e.I), p
|
||||
}
|
||||
}
|
||||
i++
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// print writes the contractTrieSet t as compilable Go code to w. It returns
|
||||
// the total number of bytes written and the size of the resulting data structure in bytes.
|
||||
func print(t *colltab.ContractTrieSet, w io.Writer, name string) (n, size int, err error) {
|
||||
update3 := func(nn, sz int, e error) {
|
||||
n += nn
|
||||
if err == nil {
|
||||
err = e
|
||||
}
|
||||
size += sz
|
||||
}
|
||||
update2 := func(nn int, e error) { update3(nn, 0, e) }
|
||||
|
||||
update3(printArray(*t, w, name))
|
||||
update2(fmt.Fprintf(w, "var %sContractTrieSet = ", name))
|
||||
update3(printStruct(*t, w, name))
|
||||
update2(fmt.Fprintln(w))
|
||||
return
|
||||
}
|
||||
|
||||
func printArray(ct colltab.ContractTrieSet, w io.Writer, name string) (n, size int, err error) {
|
||||
p := func(f string, a ...interface{}) {
|
||||
nn, e := fmt.Fprintf(w, f, a...)
|
||||
n += nn
|
||||
if err == nil {
|
||||
err = e
|
||||
}
|
||||
}
|
||||
size = len(ct) * 4
|
||||
p("// %sCTEntries: %d entries, %d bytes\n", name, len(ct), size)
|
||||
p("var %sCTEntries = [%d]struct{L,H,N,I uint8}{\n", name, len(ct))
|
||||
for _, fe := range ct {
|
||||
p("\t{0x%X, 0x%X, %d, %d},\n", fe.L, fe.H, fe.N, fe.I)
|
||||
}
|
||||
p("}\n")
|
||||
return
|
||||
}
|
||||
|
||||
func printStruct(ct colltab.ContractTrieSet, w io.Writer, name string) (n, size int, err error) {
|
||||
n, err = fmt.Fprintf(w, "colltab.ContractTrieSet( %sCTEntries[:] )", name)
|
||||
size = int(reflect.TypeOf(ct).Size())
|
||||
return
|
||||
}
|
393
vendor/golang.org/x/text/collate/build/order.go
generated
vendored
393
vendor/golang.org/x/text/collate/build/order.go
generated
vendored
@ -1,393 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package build
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"sort"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
"golang.org/x/text/internal/colltab"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
type logicalAnchor int
|
||||
|
||||
const (
|
||||
firstAnchor logicalAnchor = -1
|
||||
noAnchor = 0
|
||||
lastAnchor = 1
|
||||
)
|
||||
|
||||
// entry is used to keep track of a single entry in the collation element table
|
||||
// during building. Examples of entries can be found in the Default Unicode
|
||||
// Collation Element Table.
|
||||
// See http://www.unicode.org/Public/UCA/6.0.0/allkeys.txt.
|
||||
type entry struct {
|
||||
str string // same as string(runes)
|
||||
runes []rune
|
||||
elems []rawCE // the collation elements
|
||||
extend string // weights of extend to be appended to elems
|
||||
before bool // weights relative to next instead of previous.
|
||||
lock bool // entry is used in extension and can no longer be moved.
|
||||
|
||||
// prev, next, and level are used to keep track of tailorings.
|
||||
prev, next *entry
|
||||
level colltab.Level // next differs at this level
|
||||
skipRemove bool // do not unlink when removed
|
||||
|
||||
decompose bool // can use NFKD decomposition to generate elems
|
||||
exclude bool // do not include in table
|
||||
implicit bool // derived, is not included in the list
|
||||
modified bool // entry was modified in tailoring
|
||||
logical logicalAnchor
|
||||
|
||||
expansionIndex int // used to store index into expansion table
|
||||
contractionHandle ctHandle
|
||||
contractionIndex int // index into contraction elements
|
||||
}
|
||||
|
||||
func (e *entry) String() string {
|
||||
return fmt.Sprintf("%X (%q) -> %X (ch:%x; ci:%d, ei:%d)",
|
||||
e.runes, e.str, e.elems, e.contractionHandle, e.contractionIndex, e.expansionIndex)
|
||||
}
|
||||
|
||||
func (e *entry) skip() bool {
|
||||
return e.contraction()
|
||||
}
|
||||
|
||||
func (e *entry) expansion() bool {
|
||||
return !e.decompose && len(e.elems) > 1
|
||||
}
|
||||
|
||||
func (e *entry) contraction() bool {
|
||||
return len(e.runes) > 1
|
||||
}
|
||||
|
||||
func (e *entry) contractionStarter() bool {
|
||||
return e.contractionHandle.n != 0
|
||||
}
|
||||
|
||||
// nextIndexed gets the next entry that needs to be stored in the table.
|
||||
// It returns the entry and the collation level at which the next entry differs
|
||||
// from the current entry.
|
||||
// Entries that can be explicitly derived and logical reset positions are
|
||||
// examples of entries that will not be indexed.
|
||||
func (e *entry) nextIndexed() (*entry, colltab.Level) {
|
||||
level := e.level
|
||||
for e = e.next; e != nil && (e.exclude || len(e.elems) == 0); e = e.next {
|
||||
if e.level < level {
|
||||
level = e.level
|
||||
}
|
||||
}
|
||||
return e, level
|
||||
}
|
||||
|
||||
// remove unlinks entry e from the sorted chain and clears the collation
|
||||
// elements. e may not be at the front or end of the list. This should always
|
||||
// be the case, as the front and end of the list are always logical anchors,
|
||||
// which may not be removed.
|
||||
func (e *entry) remove() {
|
||||
if e.logical != noAnchor {
|
||||
log.Fatalf("may not remove anchor %q", e.str)
|
||||
}
|
||||
// TODO: need to set e.prev.level to e.level if e.level is smaller?
|
||||
e.elems = nil
|
||||
if !e.skipRemove {
|
||||
if e.prev != nil {
|
||||
e.prev.next = e.next
|
||||
}
|
||||
if e.next != nil {
|
||||
e.next.prev = e.prev
|
||||
}
|
||||
}
|
||||
e.skipRemove = false
|
||||
}
|
||||
|
||||
// insertAfter inserts n after e.
|
||||
func (e *entry) insertAfter(n *entry) {
|
||||
if e == n {
|
||||
panic("e == anchor")
|
||||
}
|
||||
if e == nil {
|
||||
panic("unexpected nil anchor")
|
||||
}
|
||||
n.remove()
|
||||
n.decompose = false // redo decomposition test
|
||||
|
||||
n.next = e.next
|
||||
n.prev = e
|
||||
if e.next != nil {
|
||||
e.next.prev = n
|
||||
}
|
||||
e.next = n
|
||||
}
|
||||
|
||||
// insertBefore inserts n before e.
|
||||
func (e *entry) insertBefore(n *entry) {
|
||||
if e == n {
|
||||
panic("e == anchor")
|
||||
}
|
||||
if e == nil {
|
||||
panic("unexpected nil anchor")
|
||||
}
|
||||
n.remove()
|
||||
n.decompose = false // redo decomposition test
|
||||
|
||||
n.prev = e.prev
|
||||
n.next = e
|
||||
if e.prev != nil {
|
||||
e.prev.next = n
|
||||
}
|
||||
e.prev = n
|
||||
}
|
||||
|
||||
func (e *entry) encodeBase() (ce uint32, err error) {
|
||||
switch {
|
||||
case e.expansion():
|
||||
ce, err = makeExpandIndex(e.expansionIndex)
|
||||
default:
|
||||
if e.decompose {
|
||||
log.Fatal("decompose should be handled elsewhere")
|
||||
}
|
||||
ce, err = makeCE(e.elems[0])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (e *entry) encode() (ce uint32, err error) {
|
||||
if e.skip() {
|
||||
log.Fatal("cannot build colElem for entry that should be skipped")
|
||||
}
|
||||
switch {
|
||||
case e.decompose:
|
||||
t1 := e.elems[0].w[2]
|
||||
t2 := 0
|
||||
if len(e.elems) > 1 {
|
||||
t2 = e.elems[1].w[2]
|
||||
}
|
||||
ce, err = makeDecompose(t1, t2)
|
||||
case e.contractionStarter():
|
||||
ce, err = makeContractIndex(e.contractionHandle, e.contractionIndex)
|
||||
default:
|
||||
if len(e.runes) > 1 {
|
||||
log.Fatal("colElem: contractions are handled in contraction trie")
|
||||
}
|
||||
ce, err = e.encodeBase()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// entryLess returns true if a sorts before b and false otherwise.
|
||||
func entryLess(a, b *entry) bool {
|
||||
if res, _ := compareWeights(a.elems, b.elems); res != 0 {
|
||||
return res == -1
|
||||
}
|
||||
if a.logical != noAnchor {
|
||||
return a.logical == firstAnchor
|
||||
}
|
||||
if b.logical != noAnchor {
|
||||
return b.logical == lastAnchor
|
||||
}
|
||||
return a.str < b.str
|
||||
}
|
||||
|
||||
type sortedEntries []*entry
|
||||
|
||||
func (s sortedEntries) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func (s sortedEntries) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
func (s sortedEntries) Less(i, j int) bool {
|
||||
return entryLess(s[i], s[j])
|
||||
}
|
||||
|
||||
type ordering struct {
|
||||
id string
|
||||
entryMap map[string]*entry
|
||||
ordered []*entry
|
||||
handle *trieHandle
|
||||
}
|
||||
|
||||
// insert inserts e into both entryMap and ordered.
|
||||
// Note that insert simply appends e to ordered. To reattain a sorted
|
||||
// order, o.sort() should be called.
|
||||
func (o *ordering) insert(e *entry) {
|
||||
if e.logical == noAnchor {
|
||||
o.entryMap[e.str] = e
|
||||
} else {
|
||||
// Use key format as used in UCA rules.
|
||||
o.entryMap[fmt.Sprintf("[%s]", e.str)] = e
|
||||
// Also add index entry for XML format.
|
||||
o.entryMap[fmt.Sprintf("<%s/>", strings.Replace(e.str, " ", "_", -1))] = e
|
||||
}
|
||||
o.ordered = append(o.ordered, e)
|
||||
}
|
||||
|
||||
// newEntry creates a new entry for the given info and inserts it into
|
||||
// the index.
|
||||
func (o *ordering) newEntry(s string, ces []rawCE) *entry {
|
||||
e := &entry{
|
||||
runes: []rune(s),
|
||||
elems: ces,
|
||||
str: s,
|
||||
}
|
||||
o.insert(e)
|
||||
return e
|
||||
}
|
||||
|
||||
// find looks up and returns the entry for the given string.
|
||||
// It returns nil if str is not in the index and if an implicit value
|
||||
// cannot be derived, that is, if str represents more than one rune.
|
||||
func (o *ordering) find(str string) *entry {
|
||||
e := o.entryMap[str]
|
||||
if e == nil {
|
||||
r := []rune(str)
|
||||
if len(r) == 1 {
|
||||
const (
|
||||
firstHangul = 0xAC00
|
||||
lastHangul = 0xD7A3
|
||||
)
|
||||
if r[0] >= firstHangul && r[0] <= lastHangul {
|
||||
ce := []rawCE{}
|
||||
nfd := norm.NFD.String(str)
|
||||
for _, r := range nfd {
|
||||
ce = append(ce, o.find(string(r)).elems...)
|
||||
}
|
||||
e = o.newEntry(nfd, ce)
|
||||
} else {
|
||||
e = o.newEntry(string(r[0]), []rawCE{
|
||||
{w: []int{
|
||||
implicitPrimary(r[0]),
|
||||
defaultSecondary,
|
||||
defaultTertiary,
|
||||
int(r[0]),
|
||||
},
|
||||
},
|
||||
})
|
||||
e.modified = true
|
||||
}
|
||||
e.exclude = true // do not index implicits
|
||||
}
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// makeRootOrdering returns a newly initialized ordering value and populates
|
||||
// it with a set of logical reset points that can be used as anchors.
|
||||
// The anchors first_tertiary_ignorable and __END__ will always sort at
|
||||
// the beginning and end, respectively. This means that prev and next are non-nil
|
||||
// for any indexed entry.
|
||||
func makeRootOrdering() ordering {
|
||||
const max = unicode.MaxRune
|
||||
o := ordering{
|
||||
entryMap: make(map[string]*entry),
|
||||
}
|
||||
insert := func(typ logicalAnchor, s string, ce []int) {
|
||||
e := &entry{
|
||||
elems: []rawCE{{w: ce}},
|
||||
str: s,
|
||||
exclude: true,
|
||||
logical: typ,
|
||||
}
|
||||
o.insert(e)
|
||||
}
|
||||
insert(firstAnchor, "first tertiary ignorable", []int{0, 0, 0, 0})
|
||||
insert(lastAnchor, "last tertiary ignorable", []int{0, 0, 0, max})
|
||||
insert(lastAnchor, "last primary ignorable", []int{0, defaultSecondary, defaultTertiary, max})
|
||||
insert(lastAnchor, "last non ignorable", []int{maxPrimary, defaultSecondary, defaultTertiary, max})
|
||||
insert(lastAnchor, "__END__", []int{1 << maxPrimaryBits, defaultSecondary, defaultTertiary, max})
|
||||
return o
|
||||
}
|
||||
|
||||
// patchForInsert eleminates entries from the list with more than one collation element.
|
||||
// The next and prev fields of the eliminated entries still point to appropriate
|
||||
// values in the newly created list.
|
||||
// It requires that sort has been called.
|
||||
func (o *ordering) patchForInsert() {
|
||||
for i := 0; i < len(o.ordered)-1; {
|
||||
e := o.ordered[i]
|
||||
lev := e.level
|
||||
n := e.next
|
||||
for ; n != nil && len(n.elems) > 1; n = n.next {
|
||||
if n.level < lev {
|
||||
lev = n.level
|
||||
}
|
||||
n.skipRemove = true
|
||||
}
|
||||
for ; o.ordered[i] != n; i++ {
|
||||
o.ordered[i].level = lev
|
||||
o.ordered[i].next = n
|
||||
o.ordered[i+1].prev = e
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// clone copies all ordering of es into a new ordering value.
|
||||
func (o *ordering) clone() *ordering {
|
||||
o.sort()
|
||||
oo := ordering{
|
||||
entryMap: make(map[string]*entry),
|
||||
}
|
||||
for _, e := range o.ordered {
|
||||
ne := &entry{
|
||||
runes: e.runes,
|
||||
elems: e.elems,
|
||||
str: e.str,
|
||||
decompose: e.decompose,
|
||||
exclude: e.exclude,
|
||||
logical: e.logical,
|
||||
}
|
||||
oo.insert(ne)
|
||||
}
|
||||
oo.sort() // link all ordering.
|
||||
oo.patchForInsert()
|
||||
return &oo
|
||||
}
|
||||
|
||||
// front returns the first entry to be indexed.
|
||||
// It assumes that sort() has been called.
|
||||
func (o *ordering) front() *entry {
|
||||
e := o.ordered[0]
|
||||
if e.prev != nil {
|
||||
log.Panicf("unexpected first entry: %v", e)
|
||||
}
|
||||
// The first entry is always a logical position, which should not be indexed.
|
||||
e, _ = e.nextIndexed()
|
||||
return e
|
||||
}
|
||||
|
||||
// sort sorts all ordering based on their collation elements and initializes
|
||||
// the prev, next, and level fields accordingly.
|
||||
func (o *ordering) sort() {
|
||||
sort.Sort(sortedEntries(o.ordered))
|
||||
l := o.ordered
|
||||
for i := 1; i < len(l); i++ {
|
||||
k := i - 1
|
||||
l[k].next = l[i]
|
||||
_, l[k].level = compareWeights(l[k].elems, l[i].elems)
|
||||
l[i].prev = l[k]
|
||||
}
|
||||
}
|
||||
|
||||
// genColElems generates a collation element array from the runes in str. This
|
||||
// assumes that all collation elements have already been added to the Builder.
|
||||
func (o *ordering) genColElems(str string) []rawCE {
|
||||
elems := []rawCE{}
|
||||
for _, r := range []rune(str) {
|
||||
for _, ce := range o.find(string(r)).elems {
|
||||
if ce.w[0] != 0 || ce.w[1] != 0 || ce.w[2] != 0 {
|
||||
elems = append(elems, ce)
|
||||
}
|
||||
}
|
||||
}
|
||||
return elems
|
||||
}
|
81
vendor/golang.org/x/text/collate/build/table.go
generated
vendored
81
vendor/golang.org/x/text/collate/build/table.go
generated
vendored
@ -1,81 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package build
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
|
||||
"golang.org/x/text/internal/colltab"
|
||||
)
|
||||
|
||||
// table is an intermediate structure that roughly resembles the table in collate.
|
||||
type table struct {
|
||||
colltab.Table
|
||||
trie trie
|
||||
root *trieHandle
|
||||
}
|
||||
|
||||
// print writes the table as Go compilable code to w. It prefixes the
|
||||
// variable names with name. It returns the number of bytes written
|
||||
// and the size of the resulting table.
|
||||
func (t *table) fprint(w io.Writer, name string) (n, size int, err error) {
|
||||
update := func(nn, sz int, e error) {
|
||||
n += nn
|
||||
if err == nil {
|
||||
err = e
|
||||
}
|
||||
size += sz
|
||||
}
|
||||
// Write arrays needed for the structure.
|
||||
update(printColElems(w, t.ExpandElem, name+"ExpandElem"))
|
||||
update(printColElems(w, t.ContractElem, name+"ContractElem"))
|
||||
update(t.trie.printArrays(w, name))
|
||||
update(printArray(t.ContractTries, w, name))
|
||||
|
||||
nn, e := fmt.Fprintf(w, "// Total size of %sTable is %d bytes\n", name, size)
|
||||
update(nn, 0, e)
|
||||
return
|
||||
}
|
||||
|
||||
func (t *table) fprintIndex(w io.Writer, h *trieHandle, id string) (n int, err error) {
|
||||
p := func(f string, a ...interface{}) {
|
||||
nn, e := fmt.Fprintf(w, f, a...)
|
||||
n += nn
|
||||
if err == nil {
|
||||
err = e
|
||||
}
|
||||
}
|
||||
p("\t{ // %s\n", id)
|
||||
p("\t\tlookupOffset: 0x%x,\n", h.lookupStart)
|
||||
p("\t\tvaluesOffset: 0x%x,\n", h.valueStart)
|
||||
p("\t},\n")
|
||||
return
|
||||
}
|
||||
|
||||
func printColElems(w io.Writer, a []uint32, name string) (n, sz int, err error) {
|
||||
p := func(f string, a ...interface{}) {
|
||||
nn, e := fmt.Fprintf(w, f, a...)
|
||||
n += nn
|
||||
if err == nil {
|
||||
err = e
|
||||
}
|
||||
}
|
||||
sz = len(a) * int(reflect.TypeOf(uint32(0)).Size())
|
||||
p("// %s: %d entries, %d bytes\n", name, len(a), sz)
|
||||
p("var %s = [%d]uint32 {", name, len(a))
|
||||
for i, c := range a {
|
||||
switch {
|
||||
case i%64 == 0:
|
||||
p("\n\t// Block %d, offset 0x%x\n", i/64, i)
|
||||
case (i%64)%6 == 0:
|
||||
p("\n\t")
|
||||
}
|
||||
p("0x%.8X, ", c)
|
||||
}
|
||||
p("\n}\n\n")
|
||||
return
|
||||
}
|
290
vendor/golang.org/x/text/collate/build/trie.go
generated
vendored
290
vendor/golang.org/x/text/collate/build/trie.go
generated
vendored
@ -1,290 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// The trie in this file is used to associate the first full character
|
||||
// in a UTF-8 string to a collation element.
|
||||
// All but the last byte in a UTF-8 byte sequence are
|
||||
// used to look up offsets in the index table to be used for the next byte.
|
||||
// The last byte is used to index into a table of collation elements.
|
||||
// This file contains the code for the generation of the trie.
|
||||
|
||||
package build
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"io"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
const (
|
||||
blockSize = 64
|
||||
blockOffset = 2 // Subtract 2 blocks to compensate for the 0x80 added to continuation bytes.
|
||||
)
|
||||
|
||||
type trieHandle struct {
|
||||
lookupStart uint16 // offset in table for first byte
|
||||
valueStart uint16 // offset in table for first byte
|
||||
}
|
||||
|
||||
type trie struct {
|
||||
index []uint16
|
||||
values []uint32
|
||||
}
|
||||
|
||||
// trieNode is the intermediate trie structure used for generating a trie.
|
||||
type trieNode struct {
|
||||
index []*trieNode
|
||||
value []uint32
|
||||
b byte
|
||||
refValue uint16
|
||||
refIndex uint16
|
||||
}
|
||||
|
||||
func newNode() *trieNode {
|
||||
return &trieNode{
|
||||
index: make([]*trieNode, 64),
|
||||
value: make([]uint32, 128), // root node size is 128 instead of 64
|
||||
}
|
||||
}
|
||||
|
||||
func (n *trieNode) isInternal() bool {
|
||||
return n.value != nil
|
||||
}
|
||||
|
||||
func (n *trieNode) insert(r rune, value uint32) {
|
||||
const maskx = 0x3F // mask out two most-significant bits
|
||||
str := string(r)
|
||||
if len(str) == 1 {
|
||||
n.value[str[0]] = value
|
||||
return
|
||||
}
|
||||
for i := 0; i < len(str)-1; i++ {
|
||||
b := str[i] & maskx
|
||||
if n.index == nil {
|
||||
n.index = make([]*trieNode, blockSize)
|
||||
}
|
||||
nn := n.index[b]
|
||||
if nn == nil {
|
||||
nn = &trieNode{}
|
||||
nn.b = b
|
||||
n.index[b] = nn
|
||||
}
|
||||
n = nn
|
||||
}
|
||||
if n.value == nil {
|
||||
n.value = make([]uint32, blockSize)
|
||||
}
|
||||
b := str[len(str)-1] & maskx
|
||||
n.value[b] = value
|
||||
}
|
||||
|
||||
type trieBuilder struct {
|
||||
t *trie
|
||||
|
||||
roots []*trieHandle
|
||||
|
||||
lookupBlocks []*trieNode
|
||||
valueBlocks []*trieNode
|
||||
|
||||
lookupBlockIdx map[uint32]*trieNode
|
||||
valueBlockIdx map[uint32]*trieNode
|
||||
}
|
||||
|
||||
func newTrieBuilder() *trieBuilder {
|
||||
index := &trieBuilder{}
|
||||
index.lookupBlocks = make([]*trieNode, 0)
|
||||
index.valueBlocks = make([]*trieNode, 0)
|
||||
index.lookupBlockIdx = make(map[uint32]*trieNode)
|
||||
index.valueBlockIdx = make(map[uint32]*trieNode)
|
||||
// The third nil is the default null block. The other two blocks
|
||||
// are used to guarantee an offset of at least 3 for each block.
|
||||
index.lookupBlocks = append(index.lookupBlocks, nil, nil, nil)
|
||||
index.t = &trie{}
|
||||
return index
|
||||
}
|
||||
|
||||
func (b *trieBuilder) computeOffsets(n *trieNode) *trieNode {
|
||||
hasher := fnv.New32()
|
||||
if n.index != nil {
|
||||
for i, nn := range n.index {
|
||||
var vi, vv uint16
|
||||
if nn != nil {
|
||||
nn = b.computeOffsets(nn)
|
||||
n.index[i] = nn
|
||||
vi = nn.refIndex
|
||||
vv = nn.refValue
|
||||
}
|
||||
hasher.Write([]byte{byte(vi >> 8), byte(vi)})
|
||||
hasher.Write([]byte{byte(vv >> 8), byte(vv)})
|
||||
}
|
||||
h := hasher.Sum32()
|
||||
nn, ok := b.lookupBlockIdx[h]
|
||||
if !ok {
|
||||
n.refIndex = uint16(len(b.lookupBlocks)) - blockOffset
|
||||
b.lookupBlocks = append(b.lookupBlocks, n)
|
||||
b.lookupBlockIdx[h] = n
|
||||
} else {
|
||||
n = nn
|
||||
}
|
||||
} else {
|
||||
for _, v := range n.value {
|
||||
hasher.Write([]byte{byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)})
|
||||
}
|
||||
h := hasher.Sum32()
|
||||
nn, ok := b.valueBlockIdx[h]
|
||||
if !ok {
|
||||
n.refValue = uint16(len(b.valueBlocks)) - blockOffset
|
||||
n.refIndex = n.refValue
|
||||
b.valueBlocks = append(b.valueBlocks, n)
|
||||
b.valueBlockIdx[h] = n
|
||||
} else {
|
||||
n = nn
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (b *trieBuilder) addStartValueBlock(n *trieNode) uint16 {
|
||||
hasher := fnv.New32()
|
||||
for _, v := range n.value[:2*blockSize] {
|
||||
hasher.Write([]byte{byte(v >> 24), byte(v >> 16), byte(v >> 8), byte(v)})
|
||||
}
|
||||
h := hasher.Sum32()
|
||||
nn, ok := b.valueBlockIdx[h]
|
||||
if !ok {
|
||||
n.refValue = uint16(len(b.valueBlocks))
|
||||
n.refIndex = n.refValue
|
||||
b.valueBlocks = append(b.valueBlocks, n)
|
||||
// Add a dummy block to accommodate the double block size.
|
||||
b.valueBlocks = append(b.valueBlocks, nil)
|
||||
b.valueBlockIdx[h] = n
|
||||
} else {
|
||||
n = nn
|
||||
}
|
||||
return n.refValue
|
||||
}
|
||||
|
||||
func genValueBlock(t *trie, n *trieNode) {
|
||||
if n != nil {
|
||||
for _, v := range n.value {
|
||||
t.values = append(t.values, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func genLookupBlock(t *trie, n *trieNode) {
|
||||
for _, nn := range n.index {
|
||||
v := uint16(0)
|
||||
if nn != nil {
|
||||
if n.index != nil {
|
||||
v = nn.refIndex
|
||||
} else {
|
||||
v = nn.refValue
|
||||
}
|
||||
}
|
||||
t.index = append(t.index, v)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *trieBuilder) addTrie(n *trieNode) *trieHandle {
|
||||
h := &trieHandle{}
|
||||
b.roots = append(b.roots, h)
|
||||
h.valueStart = b.addStartValueBlock(n)
|
||||
if len(b.roots) == 1 {
|
||||
// We insert a null block after the first start value block.
|
||||
// This ensures that continuation bytes UTF-8 sequences of length
|
||||
// greater than 2 will automatically hit a null block if there
|
||||
// was an undefined entry.
|
||||
b.valueBlocks = append(b.valueBlocks, nil)
|
||||
}
|
||||
n = b.computeOffsets(n)
|
||||
// Offset by one extra block as the first byte starts at 0xC0 instead of 0x80.
|
||||
h.lookupStart = n.refIndex - 1
|
||||
return h
|
||||
}
|
||||
|
||||
// generate generates and returns the trie for n.
|
||||
func (b *trieBuilder) generate() (t *trie, err error) {
|
||||
t = b.t
|
||||
if len(b.valueBlocks) >= 1<<16 {
|
||||
return nil, fmt.Errorf("maximum number of value blocks exceeded (%d > %d)", len(b.valueBlocks), 1<<16)
|
||||
}
|
||||
if len(b.lookupBlocks) >= 1<<16 {
|
||||
return nil, fmt.Errorf("maximum number of lookup blocks exceeded (%d > %d)", len(b.lookupBlocks), 1<<16)
|
||||
}
|
||||
genValueBlock(t, b.valueBlocks[0])
|
||||
genValueBlock(t, &trieNode{value: make([]uint32, 64)})
|
||||
for i := 2; i < len(b.valueBlocks); i++ {
|
||||
genValueBlock(t, b.valueBlocks[i])
|
||||
}
|
||||
n := &trieNode{index: make([]*trieNode, 64)}
|
||||
genLookupBlock(t, n)
|
||||
genLookupBlock(t, n)
|
||||
genLookupBlock(t, n)
|
||||
for i := 3; i < len(b.lookupBlocks); i++ {
|
||||
genLookupBlock(t, b.lookupBlocks[i])
|
||||
}
|
||||
return b.t, nil
|
||||
}
|
||||
|
||||
func (t *trie) printArrays(w io.Writer, name string) (n, size int, err error) {
|
||||
p := func(f string, a ...interface{}) {
|
||||
nn, e := fmt.Fprintf(w, f, a...)
|
||||
n += nn
|
||||
if err == nil {
|
||||
err = e
|
||||
}
|
||||
}
|
||||
nv := len(t.values)
|
||||
p("// %sValues: %d entries, %d bytes\n", name, nv, nv*4)
|
||||
p("// Block 2 is the null block.\n")
|
||||
p("var %sValues = [%d]uint32 {", name, nv)
|
||||
var printnewline bool
|
||||
for i, v := range t.values {
|
||||
if i%blockSize == 0 {
|
||||
p("\n\t// Block %#x, offset %#x", i/blockSize, i)
|
||||
}
|
||||
if i%4 == 0 {
|
||||
printnewline = true
|
||||
}
|
||||
if v != 0 {
|
||||
if printnewline {
|
||||
p("\n\t")
|
||||
printnewline = false
|
||||
}
|
||||
p("%#04x:%#08x, ", i, v)
|
||||
}
|
||||
}
|
||||
p("\n}\n\n")
|
||||
ni := len(t.index)
|
||||
p("// %sLookup: %d entries, %d bytes\n", name, ni, ni*2)
|
||||
p("// Block 0 is the null block.\n")
|
||||
p("var %sLookup = [%d]uint16 {", name, ni)
|
||||
printnewline = false
|
||||
for i, v := range t.index {
|
||||
if i%blockSize == 0 {
|
||||
p("\n\t// Block %#x, offset %#x", i/blockSize, i)
|
||||
}
|
||||
if i%8 == 0 {
|
||||
printnewline = true
|
||||
}
|
||||
if v != 0 {
|
||||
if printnewline {
|
||||
p("\n\t")
|
||||
printnewline = false
|
||||
}
|
||||
p("%#03x:%#02x, ", i, v)
|
||||
}
|
||||
}
|
||||
p("\n}\n\n")
|
||||
return n, nv*4 + ni*2, err
|
||||
}
|
||||
|
||||
func (t *trie) printStruct(w io.Writer, handle *trieHandle, name string) (n, sz int, err error) {
|
||||
const msg = "trie{ %sLookup[%d:], %sValues[%d:], %sLookup[:], %sValues[:]}"
|
||||
n, err = fmt.Fprintf(w, msg, name, handle.lookupStart*blockSize, name, handle.valueStart*blockSize, name, name)
|
||||
sz += int(reflect.TypeOf(trie{}).Size())
|
||||
return
|
||||
}
|
403
vendor/golang.org/x/text/collate/collate.go
generated
vendored
403
vendor/golang.org/x/text/collate/collate.go
generated
vendored
@ -1,403 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// TODO: remove hard-coded versions when we have implemented fractional weights.
|
||||
// The current implementation is incompatible with later CLDR versions.
|
||||
//go:generate go run maketables.go -cldr=23 -unicode=6.2.0
|
||||
|
||||
// Package collate contains types for comparing and sorting Unicode strings
|
||||
// according to a given collation order.
|
||||
package collate // import "golang.org/x/text/collate"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/text/internal/colltab"
|
||||
"golang.org/x/text/language"
|
||||
)
|
||||
|
||||
// Collator provides functionality for comparing strings for a given
|
||||
// collation order.
|
||||
type Collator struct {
|
||||
options
|
||||
|
||||
sorter sorter
|
||||
|
||||
_iter [2]iter
|
||||
}
|
||||
|
||||
func (c *Collator) iter(i int) *iter {
|
||||
// TODO: evaluate performance for making the second iterator optional.
|
||||
return &c._iter[i]
|
||||
}
|
||||
|
||||
// Supported returns the list of languages for which collating differs from its parent.
|
||||
func Supported() []language.Tag {
|
||||
// TODO: use language.Coverage instead.
|
||||
|
||||
t := make([]language.Tag, len(tags))
|
||||
copy(t, tags)
|
||||
return t
|
||||
}
|
||||
|
||||
func init() {
|
||||
ids := strings.Split(availableLocales, ",")
|
||||
tags = make([]language.Tag, len(ids))
|
||||
for i, s := range ids {
|
||||
tags[i] = language.Raw.MustParse(s)
|
||||
}
|
||||
}
|
||||
|
||||
var tags []language.Tag
|
||||
|
||||
// New returns a new Collator initialized for the given locale.
|
||||
func New(t language.Tag, o ...Option) *Collator {
|
||||
index := colltab.MatchLang(t, tags)
|
||||
c := newCollator(getTable(locales[index]))
|
||||
|
||||
// Set options from the user-supplied tag.
|
||||
c.setFromTag(t)
|
||||
|
||||
// Set the user-supplied options.
|
||||
c.setOptions(o)
|
||||
|
||||
c.init()
|
||||
return c
|
||||
}
|
||||
|
||||
// NewFromTable returns a new Collator for the given Weighter.
|
||||
func NewFromTable(w colltab.Weighter, o ...Option) *Collator {
|
||||
c := newCollator(w)
|
||||
c.setOptions(o)
|
||||
c.init()
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Collator) init() {
|
||||
if c.numeric {
|
||||
c.t = colltab.NewNumericWeighter(c.t)
|
||||
}
|
||||
c._iter[0].init(c)
|
||||
c._iter[1].init(c)
|
||||
}
|
||||
|
||||
// Buffer holds keys generated by Key and KeyString.
|
||||
type Buffer struct {
|
||||
buf [4096]byte
|
||||
key []byte
|
||||
}
|
||||
|
||||
func (b *Buffer) init() {
|
||||
if b.key == nil {
|
||||
b.key = b.buf[:0]
|
||||
}
|
||||
}
|
||||
|
||||
// Reset clears the buffer from previous results generated by Key and KeyString.
|
||||
func (b *Buffer) Reset() {
|
||||
b.key = b.key[:0]
|
||||
}
|
||||
|
||||
// Compare returns an integer comparing the two byte slices.
|
||||
// The result will be 0 if a==b, -1 if a < b, and +1 if a > b.
|
||||
func (c *Collator) Compare(a, b []byte) int {
|
||||
// TODO: skip identical prefixes once we have a fast way to detect if a rune is
|
||||
// part of a contraction. This would lead to roughly a 10% speedup for the colcmp regtest.
|
||||
c.iter(0).SetInput(a)
|
||||
c.iter(1).SetInput(b)
|
||||
if res := c.compare(); res != 0 {
|
||||
return res
|
||||
}
|
||||
if !c.ignore[colltab.Identity] {
|
||||
return bytes.Compare(a, b)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// CompareString returns an integer comparing the two strings.
|
||||
// The result will be 0 if a==b, -1 if a < b, and +1 if a > b.
|
||||
func (c *Collator) CompareString(a, b string) int {
|
||||
// TODO: skip identical prefixes once we have a fast way to detect if a rune is
|
||||
// part of a contraction. This would lead to roughly a 10% speedup for the colcmp regtest.
|
||||
c.iter(0).SetInputString(a)
|
||||
c.iter(1).SetInputString(b)
|
||||
if res := c.compare(); res != 0 {
|
||||
return res
|
||||
}
|
||||
if !c.ignore[colltab.Identity] {
|
||||
if a < b {
|
||||
return -1
|
||||
} else if a > b {
|
||||
return 1
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func compareLevel(f func(i *iter) int, a, b *iter) int {
|
||||
a.pce = 0
|
||||
b.pce = 0
|
||||
for {
|
||||
va := f(a)
|
||||
vb := f(b)
|
||||
if va != vb {
|
||||
if va < vb {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
} else if va == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (c *Collator) compare() int {
|
||||
ia, ib := c.iter(0), c.iter(1)
|
||||
// Process primary level
|
||||
if c.alternate != altShifted {
|
||||
// TODO: implement script reordering
|
||||
if res := compareLevel((*iter).nextPrimary, ia, ib); res != 0 {
|
||||
return res
|
||||
}
|
||||
} else {
|
||||
// TODO: handle shifted
|
||||
}
|
||||
if !c.ignore[colltab.Secondary] {
|
||||
f := (*iter).nextSecondary
|
||||
if c.backwards {
|
||||
f = (*iter).prevSecondary
|
||||
}
|
||||
if res := compareLevel(f, ia, ib); res != 0 {
|
||||
return res
|
||||
}
|
||||
}
|
||||
// TODO: special case handling (Danish?)
|
||||
if !c.ignore[colltab.Tertiary] || c.caseLevel {
|
||||
if res := compareLevel((*iter).nextTertiary, ia, ib); res != 0 {
|
||||
return res
|
||||
}
|
||||
if !c.ignore[colltab.Quaternary] {
|
||||
if res := compareLevel((*iter).nextQuaternary, ia, ib); res != 0 {
|
||||
return res
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Key returns the collation key for str.
|
||||
// Passing the buffer buf may avoid memory allocations.
|
||||
// The returned slice will point to an allocation in Buffer and will remain
|
||||
// valid until the next call to buf.Reset().
|
||||
func (c *Collator) Key(buf *Buffer, str []byte) []byte {
|
||||
// See http://www.unicode.org/reports/tr10/#Main_Algorithm for more details.
|
||||
buf.init()
|
||||
return c.key(buf, c.getColElems(str))
|
||||
}
|
||||
|
||||
// KeyFromString returns the collation key for str.
|
||||
// Passing the buffer buf may avoid memory allocations.
|
||||
// The returned slice will point to an allocation in Buffer and will retain
|
||||
// valid until the next call to buf.ResetKeys().
|
||||
func (c *Collator) KeyFromString(buf *Buffer, str string) []byte {
|
||||
// See http://www.unicode.org/reports/tr10/#Main_Algorithm for more details.
|
||||
buf.init()
|
||||
return c.key(buf, c.getColElemsString(str))
|
||||
}
|
||||
|
||||
func (c *Collator) key(buf *Buffer, w []colltab.Elem) []byte {
|
||||
processWeights(c.alternate, c.t.Top(), w)
|
||||
kn := len(buf.key)
|
||||
c.keyFromElems(buf, w)
|
||||
return buf.key[kn:]
|
||||
}
|
||||
|
||||
func (c *Collator) getColElems(str []byte) []colltab.Elem {
|
||||
i := c.iter(0)
|
||||
i.SetInput(str)
|
||||
for i.Next() {
|
||||
}
|
||||
return i.Elems
|
||||
}
|
||||
|
||||
func (c *Collator) getColElemsString(str string) []colltab.Elem {
|
||||
i := c.iter(0)
|
||||
i.SetInputString(str)
|
||||
for i.Next() {
|
||||
}
|
||||
return i.Elems
|
||||
}
|
||||
|
||||
type iter struct {
|
||||
wa [512]colltab.Elem
|
||||
|
||||
colltab.Iter
|
||||
pce int
|
||||
}
|
||||
|
||||
func (i *iter) init(c *Collator) {
|
||||
i.Weighter = c.t
|
||||
i.Elems = i.wa[:0]
|
||||
}
|
||||
|
||||
func (i *iter) nextPrimary() int {
|
||||
for {
|
||||
for ; i.pce < i.N; i.pce++ {
|
||||
if v := i.Elems[i.pce].Primary(); v != 0 {
|
||||
i.pce++
|
||||
return v
|
||||
}
|
||||
}
|
||||
if !i.Next() {
|
||||
return 0
|
||||
}
|
||||
}
|
||||
panic("should not reach here")
|
||||
}
|
||||
|
||||
func (i *iter) nextSecondary() int {
|
||||
for ; i.pce < len(i.Elems); i.pce++ {
|
||||
if v := i.Elems[i.pce].Secondary(); v != 0 {
|
||||
i.pce++
|
||||
return v
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (i *iter) prevSecondary() int {
|
||||
for ; i.pce < len(i.Elems); i.pce++ {
|
||||
if v := i.Elems[len(i.Elems)-i.pce-1].Secondary(); v != 0 {
|
||||
i.pce++
|
||||
return v
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (i *iter) nextTertiary() int {
|
||||
for ; i.pce < len(i.Elems); i.pce++ {
|
||||
if v := i.Elems[i.pce].Tertiary(); v != 0 {
|
||||
i.pce++
|
||||
return int(v)
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (i *iter) nextQuaternary() int {
|
||||
for ; i.pce < len(i.Elems); i.pce++ {
|
||||
if v := i.Elems[i.pce].Quaternary(); v != 0 {
|
||||
i.pce++
|
||||
return v
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func appendPrimary(key []byte, p int) []byte {
|
||||
// Convert to variable length encoding; supports up to 23 bits.
|
||||
if p <= 0x7FFF {
|
||||
key = append(key, uint8(p>>8), uint8(p))
|
||||
} else {
|
||||
key = append(key, uint8(p>>16)|0x80, uint8(p>>8), uint8(p))
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
// keyFromElems converts the weights ws to a compact sequence of bytes.
|
||||
// The result will be appended to the byte buffer in buf.
|
||||
func (c *Collator) keyFromElems(buf *Buffer, ws []colltab.Elem) {
|
||||
for _, v := range ws {
|
||||
if w := v.Primary(); w > 0 {
|
||||
buf.key = appendPrimary(buf.key, w)
|
||||
}
|
||||
}
|
||||
if !c.ignore[colltab.Secondary] {
|
||||
buf.key = append(buf.key, 0, 0)
|
||||
// TODO: we can use one 0 if we can guarantee that all non-zero weights are > 0xFF.
|
||||
if !c.backwards {
|
||||
for _, v := range ws {
|
||||
if w := v.Secondary(); w > 0 {
|
||||
buf.key = append(buf.key, uint8(w>>8), uint8(w))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for i := len(ws) - 1; i >= 0; i-- {
|
||||
if w := ws[i].Secondary(); w > 0 {
|
||||
buf.key = append(buf.key, uint8(w>>8), uint8(w))
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if c.caseLevel {
|
||||
buf.key = append(buf.key, 0, 0)
|
||||
}
|
||||
if !c.ignore[colltab.Tertiary] || c.caseLevel {
|
||||
buf.key = append(buf.key, 0, 0)
|
||||
for _, v := range ws {
|
||||
if w := v.Tertiary(); w > 0 {
|
||||
buf.key = append(buf.key, uint8(w))
|
||||
}
|
||||
}
|
||||
// Derive the quaternary weights from the options and other levels.
|
||||
// Note that we represent MaxQuaternary as 0xFF. The first byte of the
|
||||
// representation of a primary weight is always smaller than 0xFF,
|
||||
// so using this single byte value will compare correctly.
|
||||
if !c.ignore[colltab.Quaternary] && c.alternate >= altShifted {
|
||||
if c.alternate == altShiftTrimmed {
|
||||
lastNonFFFF := len(buf.key)
|
||||
buf.key = append(buf.key, 0)
|
||||
for _, v := range ws {
|
||||
if w := v.Quaternary(); w == colltab.MaxQuaternary {
|
||||
buf.key = append(buf.key, 0xFF)
|
||||
} else if w > 0 {
|
||||
buf.key = appendPrimary(buf.key, w)
|
||||
lastNonFFFF = len(buf.key)
|
||||
}
|
||||
}
|
||||
buf.key = buf.key[:lastNonFFFF]
|
||||
} else {
|
||||
buf.key = append(buf.key, 0)
|
||||
for _, v := range ws {
|
||||
if w := v.Quaternary(); w == colltab.MaxQuaternary {
|
||||
buf.key = append(buf.key, 0xFF)
|
||||
} else if w > 0 {
|
||||
buf.key = appendPrimary(buf.key, w)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func processWeights(vw alternateHandling, top uint32, wa []colltab.Elem) {
|
||||
ignore := false
|
||||
vtop := int(top)
|
||||
switch vw {
|
||||
case altShifted, altShiftTrimmed:
|
||||
for i := range wa {
|
||||
if p := wa[i].Primary(); p <= vtop && p != 0 {
|
||||
wa[i] = colltab.MakeQuaternary(p)
|
||||
ignore = true
|
||||
} else if p == 0 {
|
||||
if ignore {
|
||||
wa[i] = colltab.Ignore
|
||||
}
|
||||
} else {
|
||||
ignore = false
|
||||
}
|
||||
}
|
||||
case altBlanked:
|
||||
for i := range wa {
|
||||
if p := wa[i].Primary(); p <= vtop && (ignore || p != 0) {
|
||||
wa[i] = colltab.Ignore
|
||||
ignore = true
|
||||
} else {
|
||||
ignore = false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
32
vendor/golang.org/x/text/collate/index.go
generated
vendored
32
vendor/golang.org/x/text/collate/index.go
generated
vendored
@ -1,32 +0,0 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package collate
|
||||
|
||||
import "golang.org/x/text/internal/colltab"
|
||||
|
||||
const blockSize = 64
|
||||
|
||||
func getTable(t tableIndex) *colltab.Table {
|
||||
return &colltab.Table{
|
||||
Index: colltab.Trie{
|
||||
Index0: mainLookup[:][blockSize*t.lookupOffset:],
|
||||
Values0: mainValues[:][blockSize*t.valuesOffset:],
|
||||
Index: mainLookup[:],
|
||||
Values: mainValues[:],
|
||||
},
|
||||
ExpandElem: mainExpandElem[:],
|
||||
ContractTries: colltab.ContractTrieSet(mainCTEntries[:]),
|
||||
ContractElem: mainContractElem[:],
|
||||
MaxContractLen: 18,
|
||||
VariableTop: varTop,
|
||||
}
|
||||
}
|
||||
|
||||
// tableIndex holds information for constructing a table
|
||||
// for a certain locale based on the main table.
|
||||
type tableIndex struct {
|
||||
lookupOffset uint32
|
||||
valuesOffset uint32
|
||||
}
|
553
vendor/golang.org/x/text/collate/maketables.go
generated
vendored
553
vendor/golang.org/x/text/collate/maketables.go
generated
vendored
@ -1,553 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
// Collation table generator.
|
||||
// Data read from the web.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"bufio"
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/text/collate"
|
||||
"golang.org/x/text/collate/build"
|
||||
"golang.org/x/text/internal/colltab"
|
||||
"golang.org/x/text/internal/gen"
|
||||
"golang.org/x/text/language"
|
||||
"golang.org/x/text/unicode/cldr"
|
||||
)
|
||||
|
||||
var (
|
||||
test = flag.Bool("test", false,
|
||||
"test existing tables; can be used to compare web data with package data.")
|
||||
short = flag.Bool("short", false, `Use "short" alternatives, when available.`)
|
||||
draft = flag.Bool("draft", false, `Use draft versions, when available.`)
|
||||
tags = flag.String("tags", "", "build tags to be included after +build directive")
|
||||
pkg = flag.String("package", "collate",
|
||||
"the name of the package in which the generated file is to be included")
|
||||
|
||||
tables = flagStringSetAllowAll("tables", "collate", "collate,chars",
|
||||
"comma-spearated list of tables to generate.")
|
||||
exclude = flagStringSet("exclude", "zh2", "",
|
||||
"comma-separated list of languages to exclude.")
|
||||
include = flagStringSet("include", "", "",
|
||||
"comma-separated list of languages to include. Include trumps exclude.")
|
||||
// TODO: Not included: unihan gb2312han zhuyin big5han (for size reasons)
|
||||
// TODO: Not included: traditional (buggy for Bengali)
|
||||
types = flagStringSetAllowAll("types", "standard,phonebook,phonetic,reformed,pinyin,stroke", "",
|
||||
"comma-separated list of types that should be included.")
|
||||
)
|
||||
|
||||
// stringSet implements an ordered set based on a list. It implements flag.Value
|
||||
// to allow a set to be specified as a comma-separated list.
|
||||
type stringSet struct {
|
||||
s []string
|
||||
allowed *stringSet
|
||||
dirty bool // needs compaction if true
|
||||
all bool
|
||||
allowAll bool
|
||||
}
|
||||
|
||||
func flagStringSet(name, def, allowed, usage string) *stringSet {
|
||||
ss := &stringSet{}
|
||||
if allowed != "" {
|
||||
usage += fmt.Sprintf(" (allowed values: any of %s)", allowed)
|
||||
ss.allowed = &stringSet{}
|
||||
failOnError(ss.allowed.Set(allowed))
|
||||
}
|
||||
ss.Set(def)
|
||||
flag.Var(ss, name, usage)
|
||||
return ss
|
||||
}
|
||||
|
||||
func flagStringSetAllowAll(name, def, allowed, usage string) *stringSet {
|
||||
ss := &stringSet{allowAll: true}
|
||||
if allowed == "" {
|
||||
flag.Var(ss, name, usage+fmt.Sprintf(` Use "all" to select all.`))
|
||||
} else {
|
||||
ss.allowed = &stringSet{}
|
||||
failOnError(ss.allowed.Set(allowed))
|
||||
flag.Var(ss, name, usage+fmt.Sprintf(` (allowed values: "all" or any of %s)`, allowed))
|
||||
}
|
||||
ss.Set(def)
|
||||
return ss
|
||||
}
|
||||
|
||||
func (ss stringSet) Len() int {
|
||||
return len(ss.s)
|
||||
}
|
||||
|
||||
func (ss stringSet) String() string {
|
||||
return strings.Join(ss.s, ",")
|
||||
}
|
||||
|
||||
func (ss *stringSet) Set(s string) error {
|
||||
if ss.allowAll && s == "all" {
|
||||
ss.s = nil
|
||||
ss.all = true
|
||||
return nil
|
||||
}
|
||||
ss.s = ss.s[:0]
|
||||
for _, s := range strings.Split(s, ",") {
|
||||
if s := strings.TrimSpace(s); s != "" {
|
||||
if ss.allowed != nil && !ss.allowed.contains(s) {
|
||||
return fmt.Errorf("unsupported value %q; must be one of %s", s, ss.allowed)
|
||||
}
|
||||
ss.add(s)
|
||||
}
|
||||
}
|
||||
ss.compact()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ss *stringSet) add(s string) {
|
||||
ss.s = append(ss.s, s)
|
||||
ss.dirty = true
|
||||
}
|
||||
|
||||
func (ss *stringSet) values() []string {
|
||||
ss.compact()
|
||||
return ss.s
|
||||
}
|
||||
|
||||
func (ss *stringSet) contains(s string) bool {
|
||||
if ss.all {
|
||||
return true
|
||||
}
|
||||
for _, v := range ss.s {
|
||||
if v == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (ss *stringSet) compact() {
|
||||
if !ss.dirty {
|
||||
return
|
||||
}
|
||||
a := ss.s
|
||||
sort.Strings(a)
|
||||
k := 0
|
||||
for i := 1; i < len(a); i++ {
|
||||
if a[k] != a[i] {
|
||||
a[k+1] = a[i]
|
||||
k++
|
||||
}
|
||||
}
|
||||
ss.s = a[:k+1]
|
||||
ss.dirty = false
|
||||
}
|
||||
|
||||
func skipLang(l string) bool {
|
||||
if include.Len() > 0 {
|
||||
return !include.contains(l)
|
||||
}
|
||||
return exclude.contains(l)
|
||||
}
|
||||
|
||||
// altInclude returns a list of alternatives (for the LDML alt attribute)
|
||||
// in order of preference. An empty string in this list indicates the
|
||||
// default entry.
|
||||
func altInclude() []string {
|
||||
l := []string{}
|
||||
if *short {
|
||||
l = append(l, "short")
|
||||
}
|
||||
l = append(l, "")
|
||||
// TODO: handle draft using cldr.SetDraftLevel
|
||||
if *draft {
|
||||
l = append(l, "proposed")
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
func failOnError(e error) {
|
||||
if e != nil {
|
||||
log.Panic(e)
|
||||
}
|
||||
}
|
||||
|
||||
func openArchive() *zip.Reader {
|
||||
f := gen.OpenCLDRCoreZip()
|
||||
buffer, err := ioutil.ReadAll(f)
|
||||
f.Close()
|
||||
failOnError(err)
|
||||
archive, err := zip.NewReader(bytes.NewReader(buffer), int64(len(buffer)))
|
||||
failOnError(err)
|
||||
return archive
|
||||
}
|
||||
|
||||
// parseUCA parses a Default Unicode Collation Element Table of the format
|
||||
// specified in http://www.unicode.org/reports/tr10/#File_Format.
|
||||
// It returns the variable top.
|
||||
func parseUCA(builder *build.Builder) {
|
||||
var r io.ReadCloser
|
||||
var err error
|
||||
for _, f := range openArchive().File {
|
||||
if strings.HasSuffix(f.Name, "allkeys_CLDR.txt") {
|
||||
r, err = f.Open()
|
||||
}
|
||||
}
|
||||
if r == nil {
|
||||
log.Fatal("File allkeys_CLDR.txt not found in archive.")
|
||||
}
|
||||
failOnError(err)
|
||||
defer r.Close()
|
||||
scanner := bufio.NewScanner(r)
|
||||
colelem := regexp.MustCompile(`\[([.*])([0-9A-F.]+)\]`)
|
||||
for i := 1; scanner.Scan(); i++ {
|
||||
line := scanner.Text()
|
||||
if len(line) == 0 || line[0] == '#' {
|
||||
continue
|
||||
}
|
||||
if line[0] == '@' {
|
||||
// parse properties
|
||||
switch {
|
||||
case strings.HasPrefix(line[1:], "version "):
|
||||
a := strings.Split(line[1:], " ")
|
||||
if a[1] != gen.UnicodeVersion() {
|
||||
log.Fatalf("incompatible version %s; want %s", a[1], gen.UnicodeVersion())
|
||||
}
|
||||
case strings.HasPrefix(line[1:], "backwards "):
|
||||
log.Fatalf("%d: unsupported option backwards", i)
|
||||
default:
|
||||
log.Printf("%d: unknown option %s", i, line[1:])
|
||||
}
|
||||
} else {
|
||||
// parse entries
|
||||
part := strings.Split(line, " ; ")
|
||||
if len(part) != 2 {
|
||||
log.Fatalf("%d: production rule without ';': %v", i, line)
|
||||
}
|
||||
lhs := []rune{}
|
||||
for _, v := range strings.Split(part[0], " ") {
|
||||
if v == "" {
|
||||
continue
|
||||
}
|
||||
lhs = append(lhs, rune(convHex(i, v)))
|
||||
}
|
||||
var n int
|
||||
var vars []int
|
||||
rhs := [][]int{}
|
||||
for i, m := range colelem.FindAllStringSubmatch(part[1], -1) {
|
||||
n += len(m[0])
|
||||
elem := []int{}
|
||||
for _, h := range strings.Split(m[2], ".") {
|
||||
elem = append(elem, convHex(i, h))
|
||||
}
|
||||
if m[1] == "*" {
|
||||
vars = append(vars, i)
|
||||
}
|
||||
rhs = append(rhs, elem)
|
||||
}
|
||||
if len(part[1]) < n+3 || part[1][n+1] != '#' {
|
||||
log.Fatalf("%d: expected comment; found %s", i, part[1][n:])
|
||||
}
|
||||
if *test {
|
||||
testInput.add(string(lhs))
|
||||
}
|
||||
failOnError(builder.Add(lhs, rhs, vars))
|
||||
}
|
||||
}
|
||||
if scanner.Err() != nil {
|
||||
log.Fatal(scanner.Err())
|
||||
}
|
||||
}
|
||||
|
||||
func convHex(line int, s string) int {
|
||||
r, e := strconv.ParseInt(s, 16, 32)
|
||||
if e != nil {
|
||||
log.Fatalf("%d: %v", line, e)
|
||||
}
|
||||
return int(r)
|
||||
}
|
||||
|
||||
var testInput = stringSet{}
|
||||
|
||||
var charRe = regexp.MustCompile(`&#x([0-9A-F]*);`)
|
||||
var tagRe = regexp.MustCompile(`<([a-z_]*) */>`)
|
||||
|
||||
var mainLocales = []string{}
|
||||
|
||||
// charsets holds a list of exemplar characters per category.
|
||||
type charSets map[string][]string
|
||||
|
||||
func (p charSets) fprint(w io.Writer) {
|
||||
fmt.Fprintln(w, "[exN]string{")
|
||||
for i, k := range []string{"", "contractions", "punctuation", "auxiliary", "currencySymbol", "index"} {
|
||||
if set := p[k]; len(set) != 0 {
|
||||
fmt.Fprintf(w, "\t\t%d: %q,\n", i, strings.Join(set, " "))
|
||||
}
|
||||
}
|
||||
fmt.Fprintln(w, "\t},")
|
||||
}
|
||||
|
||||
var localeChars = make(map[string]charSets)
|
||||
|
||||
const exemplarHeader = `
|
||||
type exemplarType int
|
||||
const (
|
||||
exCharacters exemplarType = iota
|
||||
exContractions
|
||||
exPunctuation
|
||||
exAuxiliary
|
||||
exCurrency
|
||||
exIndex
|
||||
exN
|
||||
)
|
||||
`
|
||||
|
||||
func printExemplarCharacters(w io.Writer) {
|
||||
fmt.Fprintln(w, exemplarHeader)
|
||||
fmt.Fprintln(w, "var exemplarCharacters = map[string][exN]string{")
|
||||
for _, loc := range mainLocales {
|
||||
fmt.Fprintf(w, "\t%q: ", loc)
|
||||
localeChars[loc].fprint(w)
|
||||
}
|
||||
fmt.Fprintln(w, "}")
|
||||
}
|
||||
|
||||
func decodeCLDR(d *cldr.Decoder) *cldr.CLDR {
|
||||
r := gen.OpenCLDRCoreZip()
|
||||
data, err := d.DecodeZip(r)
|
||||
failOnError(err)
|
||||
return data
|
||||
}
|
||||
|
||||
// parseMain parses XML files in the main directory of the CLDR core.zip file.
|
||||
func parseMain() {
|
||||
d := &cldr.Decoder{}
|
||||
d.SetDirFilter("main")
|
||||
d.SetSectionFilter("characters")
|
||||
data := decodeCLDR(d)
|
||||
for _, loc := range data.Locales() {
|
||||
x := data.RawLDML(loc)
|
||||
if skipLang(x.Identity.Language.Type) {
|
||||
continue
|
||||
}
|
||||
if x.Characters != nil {
|
||||
x, _ = data.LDML(loc)
|
||||
loc = language.Make(loc).String()
|
||||
for _, ec := range x.Characters.ExemplarCharacters {
|
||||
if ec.Draft != "" {
|
||||
continue
|
||||
}
|
||||
if _, ok := localeChars[loc]; !ok {
|
||||
mainLocales = append(mainLocales, loc)
|
||||
localeChars[loc] = make(charSets)
|
||||
}
|
||||
localeChars[loc][ec.Type] = parseCharacters(ec.Data())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func parseCharacters(chars string) []string {
|
||||
parseSingle := func(s string) (r rune, tail string, escaped bool) {
|
||||
if s[0] == '\\' {
|
||||
return rune(s[1]), s[2:], true
|
||||
}
|
||||
r, sz := utf8.DecodeRuneInString(s)
|
||||
return r, s[sz:], false
|
||||
}
|
||||
chars = strings.TrimSpace(chars)
|
||||
if n := len(chars) - 1; chars[n] == ']' && chars[0] == '[' {
|
||||
chars = chars[1:n]
|
||||
}
|
||||
list := []string{}
|
||||
var r, last, end rune
|
||||
for len(chars) > 0 {
|
||||
if chars[0] == '{' { // character sequence
|
||||
buf := []rune{}
|
||||
for chars = chars[1:]; len(chars) > 0; {
|
||||
r, chars, _ = parseSingle(chars)
|
||||
if r == '}' {
|
||||
break
|
||||
}
|
||||
if r == ' ' {
|
||||
log.Fatalf("space not supported in sequence %q", chars)
|
||||
}
|
||||
buf = append(buf, r)
|
||||
}
|
||||
list = append(list, string(buf))
|
||||
last = 0
|
||||
} else { // single character
|
||||
escaped := false
|
||||
r, chars, escaped = parseSingle(chars)
|
||||
if r != ' ' {
|
||||
if r == '-' && !escaped {
|
||||
if last == 0 {
|
||||
log.Fatal("'-' should be preceded by a character")
|
||||
}
|
||||
end, chars, _ = parseSingle(chars)
|
||||
for ; last <= end; last++ {
|
||||
list = append(list, string(last))
|
||||
}
|
||||
last = 0
|
||||
} else {
|
||||
list = append(list, string(r))
|
||||
last = r
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
var fileRe = regexp.MustCompile(`.*/collation/(.*)\.xml`)
|
||||
|
||||
// typeMap translates legacy type keys to their BCP47 equivalent.
|
||||
var typeMap = map[string]string{
|
||||
"phonebook": "phonebk",
|
||||
"traditional": "trad",
|
||||
}
|
||||
|
||||
// parseCollation parses XML files in the collation directory of the CLDR core.zip file.
|
||||
func parseCollation(b *build.Builder) {
|
||||
d := &cldr.Decoder{}
|
||||
d.SetDirFilter("collation")
|
||||
data := decodeCLDR(d)
|
||||
for _, loc := range data.Locales() {
|
||||
x, err := data.LDML(loc)
|
||||
failOnError(err)
|
||||
if skipLang(x.Identity.Language.Type) {
|
||||
continue
|
||||
}
|
||||
cs := x.Collations.Collation
|
||||
sl := cldr.MakeSlice(&cs)
|
||||
if len(types.s) == 0 {
|
||||
sl.SelectAnyOf("type", x.Collations.Default())
|
||||
} else if !types.all {
|
||||
sl.SelectAnyOf("type", types.s...)
|
||||
}
|
||||
sl.SelectOnePerGroup("alt", altInclude())
|
||||
|
||||
for _, c := range cs {
|
||||
id, err := language.Parse(loc)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "invalid locale: %q", err)
|
||||
continue
|
||||
}
|
||||
// Support both old- and new-style defaults.
|
||||
d := c.Type
|
||||
if x.Collations.DefaultCollation == nil {
|
||||
d = x.Collations.Default()
|
||||
} else {
|
||||
d = x.Collations.DefaultCollation.Data()
|
||||
}
|
||||
// We assume tables are being built either for search or collation,
|
||||
// but not both. For search the default is always "search".
|
||||
if d != c.Type && c.Type != "search" {
|
||||
typ := c.Type
|
||||
if len(c.Type) > 8 {
|
||||
typ = typeMap[c.Type]
|
||||
}
|
||||
id, err = id.SetTypeForKey("co", typ)
|
||||
failOnError(err)
|
||||
}
|
||||
t := b.Tailoring(id)
|
||||
c.Process(processor{t})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type processor struct {
|
||||
t *build.Tailoring
|
||||
}
|
||||
|
||||
func (p processor) Reset(anchor string, before int) (err error) {
|
||||
if before != 0 {
|
||||
err = p.t.SetAnchorBefore(anchor)
|
||||
} else {
|
||||
err = p.t.SetAnchor(anchor)
|
||||
}
|
||||
failOnError(err)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p processor) Insert(level int, str, context, extend string) error {
|
||||
str = context + str
|
||||
if *test {
|
||||
testInput.add(str)
|
||||
}
|
||||
// TODO: mimic bug in old maketables: remove.
|
||||
err := p.t.Insert(colltab.Level(level-1), str, context+extend)
|
||||
failOnError(err)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p processor) Index(id string) {
|
||||
}
|
||||
|
||||
func testCollator(c *collate.Collator) {
|
||||
c0 := collate.New(language.Und)
|
||||
|
||||
// iterator over all characters for all locales and check
|
||||
// whether Key is equal.
|
||||
buf := collate.Buffer{}
|
||||
|
||||
// Add all common and not too uncommon runes to the test set.
|
||||
for i := rune(0); i < 0x30000; i++ {
|
||||
testInput.add(string(i))
|
||||
}
|
||||
for i := rune(0xE0000); i < 0xF0000; i++ {
|
||||
testInput.add(string(i))
|
||||
}
|
||||
for _, str := range testInput.values() {
|
||||
k0 := c0.KeyFromString(&buf, str)
|
||||
k := c.KeyFromString(&buf, str)
|
||||
if !bytes.Equal(k0, k) {
|
||||
failOnError(fmt.Errorf("test:%U: keys differ (%x vs %x)", []rune(str), k0, k))
|
||||
}
|
||||
buf.Reset()
|
||||
}
|
||||
fmt.Println("PASS")
|
||||
}
|
||||
|
||||
func main() {
|
||||
gen.Init()
|
||||
b := build.NewBuilder()
|
||||
parseUCA(b)
|
||||
if tables.contains("chars") {
|
||||
parseMain()
|
||||
}
|
||||
parseCollation(b)
|
||||
|
||||
c, err := b.Build()
|
||||
failOnError(err)
|
||||
|
||||
if *test {
|
||||
testCollator(collate.NewFromTable(c))
|
||||
} else {
|
||||
w := &bytes.Buffer{}
|
||||
|
||||
gen.WriteUnicodeVersion(w)
|
||||
gen.WriteCLDRVersion(w)
|
||||
|
||||
if tables.contains("collate") {
|
||||
_, err = b.Print(w)
|
||||
failOnError(err)
|
||||
}
|
||||
if tables.contains("chars") {
|
||||
printExemplarCharacters(w)
|
||||
}
|
||||
gen.WriteGoFile("tables.go", *pkg, w.Bytes())
|
||||
}
|
||||
}
|
239
vendor/golang.org/x/text/collate/option.go
generated
vendored
239
vendor/golang.org/x/text/collate/option.go
generated
vendored
@ -1,239 +0,0 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package collate
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"golang.org/x/text/internal/colltab"
|
||||
"golang.org/x/text/language"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
// newCollator creates a new collator with default options configured.
|
||||
func newCollator(t colltab.Weighter) *Collator {
|
||||
// Initialize a collator with default options.
|
||||
c := &Collator{
|
||||
options: options{
|
||||
ignore: [colltab.NumLevels]bool{
|
||||
colltab.Quaternary: true,
|
||||
colltab.Identity: true,
|
||||
},
|
||||
f: norm.NFD,
|
||||
t: t,
|
||||
},
|
||||
}
|
||||
|
||||
// TODO: store vt in tags or remove.
|
||||
c.variableTop = t.Top()
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// An Option is used to change the behavior of a Collator. Options override the
|
||||
// settings passed through the locale identifier.
|
||||
type Option struct {
|
||||
priority int
|
||||
f func(o *options)
|
||||
}
|
||||
|
||||
type prioritizedOptions []Option
|
||||
|
||||
func (p prioritizedOptions) Len() int {
|
||||
return len(p)
|
||||
}
|
||||
|
||||
func (p prioritizedOptions) Swap(i, j int) {
|
||||
p[i], p[j] = p[j], p[i]
|
||||
}
|
||||
|
||||
func (p prioritizedOptions) Less(i, j int) bool {
|
||||
return p[i].priority < p[j].priority
|
||||
}
|
||||
|
||||
type options struct {
|
||||
// ignore specifies which levels to ignore.
|
||||
ignore [colltab.NumLevels]bool
|
||||
|
||||
// caseLevel is true if there is an additional level of case matching
|
||||
// between the secondary and tertiary levels.
|
||||
caseLevel bool
|
||||
|
||||
// backwards specifies the order of sorting at the secondary level.
|
||||
// This option exists predominantly to support reverse sorting of accents in French.
|
||||
backwards bool
|
||||
|
||||
// numeric specifies whether any sequence of decimal digits (category is Nd)
|
||||
// is sorted at a primary level with its numeric value.
|
||||
// For example, "A-21" < "A-123".
|
||||
// This option is set by wrapping the main Weighter with NewNumericWeighter.
|
||||
numeric bool
|
||||
|
||||
// alternate specifies an alternative handling of variables.
|
||||
alternate alternateHandling
|
||||
|
||||
// variableTop is the largest primary value that is considered to be
|
||||
// variable.
|
||||
variableTop uint32
|
||||
|
||||
t colltab.Weighter
|
||||
|
||||
f norm.Form
|
||||
}
|
||||
|
||||
func (o *options) setOptions(opts []Option) {
|
||||
sort.Sort(prioritizedOptions(opts))
|
||||
for _, x := range opts {
|
||||
x.f(o)
|
||||
}
|
||||
}
|
||||
|
||||
// OptionsFromTag extracts the BCP47 collation options from the tag and
|
||||
// configures a collator accordingly. These options are set before any other
|
||||
// option.
|
||||
func OptionsFromTag(t language.Tag) Option {
|
||||
return Option{0, func(o *options) {
|
||||
o.setFromTag(t)
|
||||
}}
|
||||
}
|
||||
|
||||
func (o *options) setFromTag(t language.Tag) {
|
||||
o.caseLevel = ldmlBool(t, o.caseLevel, "kc")
|
||||
o.backwards = ldmlBool(t, o.backwards, "kb")
|
||||
o.numeric = ldmlBool(t, o.numeric, "kn")
|
||||
|
||||
// Extract settings from the BCP47 u extension.
|
||||
switch t.TypeForKey("ks") { // strength
|
||||
case "level1":
|
||||
o.ignore[colltab.Secondary] = true
|
||||
o.ignore[colltab.Tertiary] = true
|
||||
case "level2":
|
||||
o.ignore[colltab.Tertiary] = true
|
||||
case "level3", "":
|
||||
// The default.
|
||||
case "level4":
|
||||
o.ignore[colltab.Quaternary] = false
|
||||
case "identic":
|
||||
o.ignore[colltab.Quaternary] = false
|
||||
o.ignore[colltab.Identity] = false
|
||||
}
|
||||
|
||||
switch t.TypeForKey("ka") {
|
||||
case "shifted":
|
||||
o.alternate = altShifted
|
||||
// The following two types are not official BCP47, but we support them to
|
||||
// give access to this otherwise hidden functionality. The name blanked is
|
||||
// derived from the LDML name blanked and posix reflects the main use of
|
||||
// the shift-trimmed option.
|
||||
case "blanked":
|
||||
o.alternate = altBlanked
|
||||
case "posix":
|
||||
o.alternate = altShiftTrimmed
|
||||
}
|
||||
|
||||
// TODO: caseFirst ("kf"), reorder ("kr"), and maybe variableTop ("vt").
|
||||
|
||||
// Not used:
|
||||
// - normalization ("kk", not necessary for this implementation)
|
||||
// - hiraganaQuatenary ("kh", obsolete)
|
||||
}
|
||||
|
||||
func ldmlBool(t language.Tag, old bool, key string) bool {
|
||||
switch t.TypeForKey(key) {
|
||||
case "true":
|
||||
return true
|
||||
case "false":
|
||||
return false
|
||||
default:
|
||||
return old
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
// IgnoreCase sets case-insensitive comparison.
|
||||
IgnoreCase Option = ignoreCase
|
||||
ignoreCase = Option{3, ignoreCaseF}
|
||||
|
||||
// IgnoreDiacritics causes diacritical marks to be ignored. ("o" == "ö").
|
||||
IgnoreDiacritics Option = ignoreDiacritics
|
||||
ignoreDiacritics = Option{3, ignoreDiacriticsF}
|
||||
|
||||
// IgnoreWidth causes full-width characters to match their half-width
|
||||
// equivalents.
|
||||
IgnoreWidth Option = ignoreWidth
|
||||
ignoreWidth = Option{2, ignoreWidthF}
|
||||
|
||||
// Loose sets the collator to ignore diacritics, case and weight.
|
||||
Loose Option = loose
|
||||
loose = Option{4, looseF}
|
||||
|
||||
// Force ordering if strings are equivalent but not equal.
|
||||
Force Option = force
|
||||
force = Option{5, forceF}
|
||||
|
||||
// Numeric specifies that numbers should sort numerically ("2" < "12").
|
||||
Numeric Option = numeric
|
||||
numeric = Option{5, numericF}
|
||||
)
|
||||
|
||||
func ignoreWidthF(o *options) {
|
||||
o.ignore[colltab.Tertiary] = true
|
||||
o.caseLevel = true
|
||||
}
|
||||
|
||||
func ignoreDiacriticsF(o *options) {
|
||||
o.ignore[colltab.Secondary] = true
|
||||
}
|
||||
|
||||
func ignoreCaseF(o *options) {
|
||||
o.ignore[colltab.Tertiary] = true
|
||||
o.caseLevel = false
|
||||
}
|
||||
|
||||
func looseF(o *options) {
|
||||
ignoreWidthF(o)
|
||||
ignoreDiacriticsF(o)
|
||||
ignoreCaseF(o)
|
||||
}
|
||||
|
||||
func forceF(o *options) {
|
||||
o.ignore[colltab.Identity] = false
|
||||
}
|
||||
|
||||
func numericF(o *options) { o.numeric = true }
|
||||
|
||||
// Reorder overrides the pre-defined ordering of scripts and character sets.
|
||||
func Reorder(s ...string) Option {
|
||||
// TODO: need fractional weights to implement this.
|
||||
panic("TODO: implement")
|
||||
}
|
||||
|
||||
// TODO: consider making these public again. These options cannot be fully
|
||||
// specified in BCP47, so an API interface seems warranted. Still a higher-level
|
||||
// interface would be nice (e.g. a POSIX option for enabling altShiftTrimmed)
|
||||
|
||||
// alternateHandling identifies the various ways in which variables are handled.
|
||||
// A rune with a primary weight lower than the variable top is considered a
|
||||
// variable.
|
||||
// See http://www.unicode.org/reports/tr10/#Variable_Weighting for details.
|
||||
type alternateHandling int
|
||||
|
||||
const (
|
||||
// altNonIgnorable turns off special handling of variables.
|
||||
altNonIgnorable alternateHandling = iota
|
||||
|
||||
// altBlanked sets variables and all subsequent primary ignorables to be
|
||||
// ignorable at all levels. This is identical to removing all variables
|
||||
// and subsequent primary ignorables from the input.
|
||||
altBlanked
|
||||
|
||||
// altShifted sets variables to be ignorable for levels one through three and
|
||||
// adds a fourth level based on the values of the ignored levels.
|
||||
altShifted
|
||||
|
||||
// altShiftTrimmed is a slight variant of altShifted that is used to
|
||||
// emulate POSIX.
|
||||
altShiftTrimmed
|
||||
)
|
81
vendor/golang.org/x/text/collate/sort.go
generated
vendored
81
vendor/golang.org/x/text/collate/sort.go
generated
vendored
@ -1,81 +0,0 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package collate
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"sort"
|
||||
)
|
||||
|
||||
const (
|
||||
maxSortBuffer = 40960
|
||||
maxSortEntries = 4096
|
||||
)
|
||||
|
||||
type swapper interface {
|
||||
Swap(i, j int)
|
||||
}
|
||||
|
||||
type sorter struct {
|
||||
buf *Buffer
|
||||
keys [][]byte
|
||||
src swapper
|
||||
}
|
||||
|
||||
func (s *sorter) init(n int) {
|
||||
if s.buf == nil {
|
||||
s.buf = &Buffer{}
|
||||
s.buf.init()
|
||||
}
|
||||
if cap(s.keys) < n {
|
||||
s.keys = make([][]byte, n)
|
||||
}
|
||||
s.keys = s.keys[0:n]
|
||||
}
|
||||
|
||||
func (s *sorter) sort(src swapper) {
|
||||
s.src = src
|
||||
sort.Sort(s)
|
||||
}
|
||||
|
||||
func (s sorter) Len() int {
|
||||
return len(s.keys)
|
||||
}
|
||||
|
||||
func (s sorter) Less(i, j int) bool {
|
||||
return bytes.Compare(s.keys[i], s.keys[j]) == -1
|
||||
}
|
||||
|
||||
func (s sorter) Swap(i, j int) {
|
||||
s.keys[i], s.keys[j] = s.keys[j], s.keys[i]
|
||||
s.src.Swap(i, j)
|
||||
}
|
||||
|
||||
// A Lister can be sorted by Collator's Sort method.
|
||||
type Lister interface {
|
||||
Len() int
|
||||
Swap(i, j int)
|
||||
// Bytes returns the bytes of the text at index i.
|
||||
Bytes(i int) []byte
|
||||
}
|
||||
|
||||
// Sort uses sort.Sort to sort the strings represented by x using the rules of c.
|
||||
func (c *Collator) Sort(x Lister) {
|
||||
n := x.Len()
|
||||
c.sorter.init(n)
|
||||
for i := 0; i < n; i++ {
|
||||
c.sorter.keys[i] = c.Key(c.sorter.buf, x.Bytes(i))
|
||||
}
|
||||
c.sorter.sort(x)
|
||||
}
|
||||
|
||||
// SortStrings uses sort.Sort to sort the strings in x using the rules of c.
|
||||
func (c *Collator) SortStrings(x []string) {
|
||||
c.sorter.init(len(x))
|
||||
for i, s := range x {
|
||||
c.sorter.keys[i] = c.KeyFromString(c.sorter.buf, s)
|
||||
}
|
||||
c.sorter.sort(sort.StringSlice(x))
|
||||
}
|
73789
vendor/golang.org/x/text/collate/tables.go
generated
vendored
73789
vendor/golang.org/x/text/collate/tables.go
generated
vendored
File diff suppressed because it is too large
Load Diff
371
vendor/golang.org/x/text/internal/colltab/collelem.go
generated
vendored
371
vendor/golang.org/x/text/internal/colltab/collelem.go
generated
vendored
@ -1,371 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package colltab
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
// Level identifies the collation comparison level.
|
||||
// The primary level corresponds to the basic sorting of text.
|
||||
// The secondary level corresponds to accents and related linguistic elements.
|
||||
// The tertiary level corresponds to casing and related concepts.
|
||||
// The quaternary level is derived from the other levels by the
|
||||
// various algorithms for handling variable elements.
|
||||
type Level int
|
||||
|
||||
const (
|
||||
Primary Level = iota
|
||||
Secondary
|
||||
Tertiary
|
||||
Quaternary
|
||||
Identity
|
||||
|
||||
NumLevels
|
||||
)
|
||||
|
||||
const (
|
||||
defaultSecondary = 0x20
|
||||
defaultTertiary = 0x2
|
||||
maxTertiary = 0x1F
|
||||
MaxQuaternary = 0x1FFFFF // 21 bits.
|
||||
)
|
||||
|
||||
// Elem is a representation of a collation element. This API provides ways to encode
|
||||
// and decode Elems. Implementations of collation tables may use values greater
|
||||
// or equal to PrivateUse for their own purposes. However, these should never be
|
||||
// returned by AppendNext.
|
||||
type Elem uint32
|
||||
|
||||
const (
|
||||
maxCE Elem = 0xAFFFFFFF
|
||||
PrivateUse = minContract
|
||||
minContract = 0xC0000000
|
||||
maxContract = 0xDFFFFFFF
|
||||
minExpand = 0xE0000000
|
||||
maxExpand = 0xEFFFFFFF
|
||||
minDecomp = 0xF0000000
|
||||
)
|
||||
|
||||
type ceType int
|
||||
|
||||
const (
|
||||
ceNormal ceType = iota // ceNormal includes implicits (ce == 0)
|
||||
ceContractionIndex // rune can be a start of a contraction
|
||||
ceExpansionIndex // rune expands into a sequence of collation elements
|
||||
ceDecompose // rune expands using NFKC decomposition
|
||||
)
|
||||
|
||||
func (ce Elem) ctype() ceType {
|
||||
if ce <= maxCE {
|
||||
return ceNormal
|
||||
}
|
||||
if ce <= maxContract {
|
||||
return ceContractionIndex
|
||||
} else {
|
||||
if ce <= maxExpand {
|
||||
return ceExpansionIndex
|
||||
}
|
||||
return ceDecompose
|
||||
}
|
||||
panic("should not reach here")
|
||||
return ceType(-1)
|
||||
}
|
||||
|
||||
// For normal collation elements, we assume that a collation element either has
|
||||
// a primary or non-default secondary value, not both.
|
||||
// Collation elements with a primary value are of the form
|
||||
// 01pppppp pppppppp ppppppp0 ssssssss
|
||||
// - p* is primary collation value
|
||||
// - s* is the secondary collation value
|
||||
// 00pppppp pppppppp ppppppps sssttttt, where
|
||||
// - p* is primary collation value
|
||||
// - s* offset of secondary from default value.
|
||||
// - t* is the tertiary collation value
|
||||
// 100ttttt cccccccc pppppppp pppppppp
|
||||
// - t* is the tertiar collation value
|
||||
// - c* is the canonical combining class
|
||||
// - p* is the primary collation value
|
||||
// Collation elements with a secondary value are of the form
|
||||
// 1010cccc ccccssss ssssssss tttttttt, where
|
||||
// - c* is the canonical combining class
|
||||
// - s* is the secondary collation value
|
||||
// - t* is the tertiary collation value
|
||||
// 11qqqqqq qqqqqqqq qqqqqqq0 00000000
|
||||
// - q* quaternary value
|
||||
const (
|
||||
ceTypeMask = 0xC0000000
|
||||
ceTypeMaskExt = 0xE0000000
|
||||
ceIgnoreMask = 0xF00FFFFF
|
||||
ceType1 = 0x40000000
|
||||
ceType2 = 0x00000000
|
||||
ceType3or4 = 0x80000000
|
||||
ceType4 = 0xA0000000
|
||||
ceTypeQ = 0xC0000000
|
||||
Ignore = ceType4
|
||||
firstNonPrimary = 0x80000000
|
||||
lastSpecialPrimary = 0xA0000000
|
||||
secondaryMask = 0x80000000
|
||||
hasTertiaryMask = 0x40000000
|
||||
primaryValueMask = 0x3FFFFE00
|
||||
maxPrimaryBits = 21
|
||||
compactPrimaryBits = 16
|
||||
maxSecondaryBits = 12
|
||||
maxTertiaryBits = 8
|
||||
maxCCCBits = 8
|
||||
maxSecondaryCompactBits = 8
|
||||
maxSecondaryDiffBits = 4
|
||||
maxTertiaryCompactBits = 5
|
||||
primaryShift = 9
|
||||
compactSecondaryShift = 5
|
||||
minCompactSecondary = defaultSecondary - 4
|
||||
)
|
||||
|
||||
func makeImplicitCE(primary int) Elem {
|
||||
return ceType1 | Elem(primary<<primaryShift) | defaultSecondary
|
||||
}
|
||||
|
||||
// MakeElem returns an Elem for the given values. It will return an error
|
||||
// if the given combination of values is invalid.
|
||||
func MakeElem(primary, secondary, tertiary int, ccc uint8) (Elem, error) {
|
||||
if w := primary; w >= 1<<maxPrimaryBits || w < 0 {
|
||||
return 0, fmt.Errorf("makeCE: primary weight out of bounds: %x >= %x", w, 1<<maxPrimaryBits)
|
||||
}
|
||||
if w := secondary; w >= 1<<maxSecondaryBits || w < 0 {
|
||||
return 0, fmt.Errorf("makeCE: secondary weight out of bounds: %x >= %x", w, 1<<maxSecondaryBits)
|
||||
}
|
||||
if w := tertiary; w >= 1<<maxTertiaryBits || w < 0 {
|
||||
return 0, fmt.Errorf("makeCE: tertiary weight out of bounds: %x >= %x", w, 1<<maxTertiaryBits)
|
||||
}
|
||||
ce := Elem(0)
|
||||
if primary != 0 {
|
||||
if ccc != 0 {
|
||||
if primary >= 1<<compactPrimaryBits {
|
||||
return 0, fmt.Errorf("makeCE: primary weight with non-zero CCC out of bounds: %x >= %x", primary, 1<<compactPrimaryBits)
|
||||
}
|
||||
if secondary != defaultSecondary {
|
||||
return 0, fmt.Errorf("makeCE: cannot combine non-default secondary value (%x) with non-zero CCC (%x)", secondary, ccc)
|
||||
}
|
||||
ce = Elem(tertiary << (compactPrimaryBits + maxCCCBits))
|
||||
ce |= Elem(ccc) << compactPrimaryBits
|
||||
ce |= Elem(primary)
|
||||
ce |= ceType3or4
|
||||
} else if tertiary == defaultTertiary {
|
||||
if secondary >= 1<<maxSecondaryCompactBits {
|
||||
return 0, fmt.Errorf("makeCE: secondary weight with non-zero primary out of bounds: %x >= %x", secondary, 1<<maxSecondaryCompactBits)
|
||||
}
|
||||
ce = Elem(primary<<(maxSecondaryCompactBits+1) + secondary)
|
||||
ce |= ceType1
|
||||
} else {
|
||||
d := secondary - defaultSecondary + maxSecondaryDiffBits
|
||||
if d >= 1<<maxSecondaryDiffBits || d < 0 {
|
||||
return 0, fmt.Errorf("makeCE: secondary weight diff out of bounds: %x < 0 || %x > %x", d, d, 1<<maxSecondaryDiffBits)
|
||||
}
|
||||
if tertiary >= 1<<maxTertiaryCompactBits {
|
||||
return 0, fmt.Errorf("makeCE: tertiary weight with non-zero primary out of bounds: %x > %x", tertiary, 1<<maxTertiaryCompactBits)
|
||||
}
|
||||
ce = Elem(primary<<maxSecondaryDiffBits + d)
|
||||
ce = ce<<maxTertiaryCompactBits + Elem(tertiary)
|
||||
}
|
||||
} else {
|
||||
ce = Elem(secondary<<maxTertiaryBits + tertiary)
|
||||
ce += Elem(ccc) << (maxSecondaryBits + maxTertiaryBits)
|
||||
ce |= ceType4
|
||||
}
|
||||
return ce, nil
|
||||
}
|
||||
|
||||
// MakeQuaternary returns an Elem with the given quaternary value.
|
||||
func MakeQuaternary(v int) Elem {
|
||||
return ceTypeQ | Elem(v<<primaryShift)
|
||||
}
|
||||
|
||||
// Mask sets weights for any level smaller than l to 0.
|
||||
// The resulting Elem can be used to test for equality with
|
||||
// other Elems to which the same mask has been applied.
|
||||
func (ce Elem) Mask(l Level) uint32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// CCC returns the canonical combining class associated with the underlying character,
|
||||
// if applicable, or 0 otherwise.
|
||||
func (ce Elem) CCC() uint8 {
|
||||
if ce&ceType3or4 != 0 {
|
||||
if ce&ceType4 == ceType3or4 {
|
||||
return uint8(ce >> 16)
|
||||
}
|
||||
return uint8(ce >> 20)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Primary returns the primary collation weight for ce.
|
||||
func (ce Elem) Primary() int {
|
||||
if ce >= firstNonPrimary {
|
||||
if ce > lastSpecialPrimary {
|
||||
return 0
|
||||
}
|
||||
return int(uint16(ce))
|
||||
}
|
||||
return int(ce&primaryValueMask) >> primaryShift
|
||||
}
|
||||
|
||||
// Secondary returns the secondary collation weight for ce.
|
||||
func (ce Elem) Secondary() int {
|
||||
switch ce & ceTypeMask {
|
||||
case ceType1:
|
||||
return int(uint8(ce))
|
||||
case ceType2:
|
||||
return minCompactSecondary + int((ce>>compactSecondaryShift)&0xF)
|
||||
case ceType3or4:
|
||||
if ce < ceType4 {
|
||||
return defaultSecondary
|
||||
}
|
||||
return int(ce>>8) & 0xFFF
|
||||
case ceTypeQ:
|
||||
return 0
|
||||
}
|
||||
panic("should not reach here")
|
||||
}
|
||||
|
||||
// Tertiary returns the tertiary collation weight for ce.
|
||||
func (ce Elem) Tertiary() uint8 {
|
||||
if ce&hasTertiaryMask == 0 {
|
||||
if ce&ceType3or4 == 0 {
|
||||
return uint8(ce & 0x1F)
|
||||
}
|
||||
if ce&ceType4 == ceType4 {
|
||||
return uint8(ce)
|
||||
}
|
||||
return uint8(ce>>24) & 0x1F // type 2
|
||||
} else if ce&ceTypeMask == ceType1 {
|
||||
return defaultTertiary
|
||||
}
|
||||
// ce is a quaternary value.
|
||||
return 0
|
||||
}
|
||||
|
||||
func (ce Elem) updateTertiary(t uint8) Elem {
|
||||
if ce&ceTypeMask == ceType1 {
|
||||
// convert to type 4
|
||||
nce := ce & primaryValueMask
|
||||
nce |= Elem(uint8(ce)-minCompactSecondary) << compactSecondaryShift
|
||||
ce = nce
|
||||
} else if ce&ceTypeMaskExt == ceType3or4 {
|
||||
ce &= ^Elem(maxTertiary << 24)
|
||||
return ce | (Elem(t) << 24)
|
||||
} else {
|
||||
// type 2 or 4
|
||||
ce &= ^Elem(maxTertiary)
|
||||
}
|
||||
return ce | Elem(t)
|
||||
}
|
||||
|
||||
// Quaternary returns the quaternary value if explicitly specified,
|
||||
// 0 if ce == Ignore, or MaxQuaternary otherwise.
|
||||
// Quaternary values are used only for shifted variants.
|
||||
func (ce Elem) Quaternary() int {
|
||||
if ce&ceTypeMask == ceTypeQ {
|
||||
return int(ce&primaryValueMask) >> primaryShift
|
||||
} else if ce&ceIgnoreMask == Ignore {
|
||||
return 0
|
||||
}
|
||||
return MaxQuaternary
|
||||
}
|
||||
|
||||
// Weight returns the collation weight for the given level.
|
||||
func (ce Elem) Weight(l Level) int {
|
||||
switch l {
|
||||
case Primary:
|
||||
return ce.Primary()
|
||||
case Secondary:
|
||||
return ce.Secondary()
|
||||
case Tertiary:
|
||||
return int(ce.Tertiary())
|
||||
case Quaternary:
|
||||
return ce.Quaternary()
|
||||
}
|
||||
return 0 // return 0 (ignore) for undefined levels.
|
||||
}
|
||||
|
||||
// For contractions, collation elements are of the form
|
||||
// 110bbbbb bbbbbbbb iiiiiiii iiiinnnn, where
|
||||
// - n* is the size of the first node in the contraction trie.
|
||||
// - i* is the index of the first node in the contraction trie.
|
||||
// - b* is the offset into the contraction collation element table.
|
||||
// See contract.go for details on the contraction trie.
|
||||
const (
|
||||
maxNBits = 4
|
||||
maxTrieIndexBits = 12
|
||||
maxContractOffsetBits = 13
|
||||
)
|
||||
|
||||
func splitContractIndex(ce Elem) (index, n, offset int) {
|
||||
n = int(ce & (1<<maxNBits - 1))
|
||||
ce >>= maxNBits
|
||||
index = int(ce & (1<<maxTrieIndexBits - 1))
|
||||
ce >>= maxTrieIndexBits
|
||||
offset = int(ce & (1<<maxContractOffsetBits - 1))
|
||||
return
|
||||
}
|
||||
|
||||
// For expansions, Elems are of the form 11100000 00000000 bbbbbbbb bbbbbbbb,
|
||||
// where b* is the index into the expansion sequence table.
|
||||
const maxExpandIndexBits = 16
|
||||
|
||||
func splitExpandIndex(ce Elem) (index int) {
|
||||
return int(uint16(ce))
|
||||
}
|
||||
|
||||
// Some runes can be expanded using NFKD decomposition. Instead of storing the full
|
||||
// sequence of collation elements, we decompose the rune and lookup the collation
|
||||
// elements for each rune in the decomposition and modify the tertiary weights.
|
||||
// The Elem, in this case, is of the form 11110000 00000000 wwwwwwww vvvvvvvv, where
|
||||
// - v* is the replacement tertiary weight for the first rune,
|
||||
// - w* is the replacement tertiary weight for the second rune,
|
||||
// Tertiary weights of subsequent runes should be replaced with maxTertiary.
|
||||
// See http://www.unicode.org/reports/tr10/#Compatibility_Decompositions for more details.
|
||||
func splitDecompose(ce Elem) (t1, t2 uint8) {
|
||||
return uint8(ce), uint8(ce >> 8)
|
||||
}
|
||||
|
||||
const (
|
||||
// These constants were taken from http://www.unicode.org/versions/Unicode6.0.0/ch12.pdf.
|
||||
minUnified rune = 0x4E00
|
||||
maxUnified = 0x9FFF
|
||||
minCompatibility = 0xF900
|
||||
maxCompatibility = 0xFAFF
|
||||
minRare = 0x3400
|
||||
maxRare = 0x4DBF
|
||||
)
|
||||
const (
|
||||
commonUnifiedOffset = 0x10000
|
||||
rareUnifiedOffset = 0x20000 // largest rune in common is U+FAFF
|
||||
otherOffset = 0x50000 // largest rune in rare is U+2FA1D
|
||||
illegalOffset = otherOffset + int(unicode.MaxRune)
|
||||
maxPrimary = illegalOffset + 1
|
||||
)
|
||||
|
||||
// implicitPrimary returns the primary weight for the a rune
|
||||
// for which there is no entry for the rune in the collation table.
|
||||
// We take a different approach from the one specified in
|
||||
// http://unicode.org/reports/tr10/#Implicit_Weights,
|
||||
// but preserve the resulting relative ordering of the runes.
|
||||
func implicitPrimary(r rune) int {
|
||||
if unicode.Is(unicode.Ideographic, r) {
|
||||
if r >= minUnified && r <= maxUnified {
|
||||
// The most common case for CJK.
|
||||
return int(r) + commonUnifiedOffset
|
||||
}
|
||||
if r >= minCompatibility && r <= maxCompatibility {
|
||||
// This will typically not hit. The DUCET explicitly specifies mappings
|
||||
// for all characters that do not decompose.
|
||||
return int(r) + commonUnifiedOffset
|
||||
}
|
||||
return int(r) + rareUnifiedOffset
|
||||
}
|
||||
return int(r) + otherOffset
|
||||
}
|
105
vendor/golang.org/x/text/internal/colltab/colltab.go
generated
vendored
105
vendor/golang.org/x/text/internal/colltab/colltab.go
generated
vendored
@ -1,105 +0,0 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package colltab contains functionality related to collation tables.
|
||||
// It is only to be used by the collate and search packages.
|
||||
package colltab // import "golang.org/x/text/internal/colltab"
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"golang.org/x/text/language"
|
||||
)
|
||||
|
||||
// MatchLang finds the index of t in tags, using a matching algorithm used for
|
||||
// collation and search. tags[0] must be language.Und, the remaining tags should
|
||||
// be sorted alphabetically.
|
||||
//
|
||||
// Language matching for collation and search is different from the matching
|
||||
// defined by language.Matcher: the (inferred) base language must be an exact
|
||||
// match for the relevant fields. For example, "gsw" should not match "de".
|
||||
// Also the parent relation is different, as a parent may have a different
|
||||
// script. So usually the parent of zh-Hant is und, whereas for MatchLang it is
|
||||
// zh.
|
||||
func MatchLang(t language.Tag, tags []language.Tag) int {
|
||||
// Canonicalize the values, including collapsing macro languages.
|
||||
t, _ = language.All.Canonicalize(t)
|
||||
|
||||
base, conf := t.Base()
|
||||
// Estimate the base language, but only use high-confidence values.
|
||||
if conf < language.High {
|
||||
// The root locale supports "search" and "standard". We assume that any
|
||||
// implementation will only use one of both.
|
||||
return 0
|
||||
}
|
||||
|
||||
// Maximize base and script and normalize the tag.
|
||||
if _, s, r := t.Raw(); (r != language.Region{}) {
|
||||
p, _ := language.Raw.Compose(base, s, r)
|
||||
// Taking the parent forces the script to be maximized.
|
||||
p = p.Parent()
|
||||
// Add back region and extensions.
|
||||
t, _ = language.Raw.Compose(p, r, t.Extensions())
|
||||
} else {
|
||||
// Set the maximized base language.
|
||||
t, _ = language.Raw.Compose(base, s, t.Extensions())
|
||||
}
|
||||
|
||||
// Find start index of the language tag.
|
||||
start := 1 + sort.Search(len(tags)-1, func(i int) bool {
|
||||
b, _, _ := tags[i+1].Raw()
|
||||
return base.String() <= b.String()
|
||||
})
|
||||
if start < len(tags) {
|
||||
if b, _, _ := tags[start].Raw(); b != base {
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// Besides the base language, script and region, only the collation type and
|
||||
// the custom variant defined in the 'u' extension are used to distinguish a
|
||||
// locale.
|
||||
// Strip all variants and extensions and add back the custom variant.
|
||||
tdef, _ := language.Raw.Compose(t.Raw())
|
||||
tdef, _ = tdef.SetTypeForKey("va", t.TypeForKey("va"))
|
||||
|
||||
// First search for a specialized collation type, if present.
|
||||
try := []language.Tag{tdef}
|
||||
if co := t.TypeForKey("co"); co != "" {
|
||||
tco, _ := tdef.SetTypeForKey("co", co)
|
||||
try = []language.Tag{tco, tdef}
|
||||
}
|
||||
|
||||
for _, tx := range try {
|
||||
for ; tx != language.Und; tx = parent(tx) {
|
||||
for i, t := range tags[start:] {
|
||||
if b, _, _ := t.Raw(); b != base {
|
||||
break
|
||||
}
|
||||
if tx == t {
|
||||
return start + i
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// parent computes the structural parent. This means inheritance may change
|
||||
// script. So, unlike the CLDR parent, parent(zh-Hant) == zh.
|
||||
func parent(t language.Tag) language.Tag {
|
||||
if t.TypeForKey("va") != "" {
|
||||
t, _ = t.SetTypeForKey("va", "")
|
||||
return t
|
||||
}
|
||||
result := language.Und
|
||||
if b, s, r := t.Raw(); (r != language.Region{}) {
|
||||
result, _ = language.Raw.Compose(b, s, t.Extensions())
|
||||
} else if (s != language.Script{}) {
|
||||
result, _ = language.Raw.Compose(b, t.Extensions())
|
||||
} else if (b != language.Base{}) {
|
||||
result, _ = language.Raw.Compose(t.Extensions())
|
||||
}
|
||||
return result
|
||||
}
|
145
vendor/golang.org/x/text/internal/colltab/contract.go
generated
vendored
145
vendor/golang.org/x/text/internal/colltab/contract.go
generated
vendored
@ -1,145 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package colltab
|
||||
|
||||
import "unicode/utf8"
|
||||
|
||||
// For a description of ContractTrieSet, see text/collate/build/contract.go.
|
||||
|
||||
type ContractTrieSet []struct{ L, H, N, I uint8 }
|
||||
|
||||
// ctScanner is used to match a trie to an input sequence.
|
||||
// A contraction may match a non-contiguous sequence of bytes in an input string.
|
||||
// For example, if there is a contraction for <a, combining_ring>, it should match
|
||||
// the sequence <a, combining_cedilla, combining_ring>, as combining_cedilla does
|
||||
// not block combining_ring.
|
||||
// ctScanner does not automatically skip over non-blocking non-starters, but rather
|
||||
// retains the state of the last match and leaves it up to the user to continue
|
||||
// the match at the appropriate points.
|
||||
type ctScanner struct {
|
||||
states ContractTrieSet
|
||||
s []byte
|
||||
n int
|
||||
index int
|
||||
pindex int
|
||||
done bool
|
||||
}
|
||||
|
||||
type ctScannerString struct {
|
||||
states ContractTrieSet
|
||||
s string
|
||||
n int
|
||||
index int
|
||||
pindex int
|
||||
done bool
|
||||
}
|
||||
|
||||
func (t ContractTrieSet) scanner(index, n int, b []byte) ctScanner {
|
||||
return ctScanner{s: b, states: t[index:], n: n}
|
||||
}
|
||||
|
||||
func (t ContractTrieSet) scannerString(index, n int, str string) ctScannerString {
|
||||
return ctScannerString{s: str, states: t[index:], n: n}
|
||||
}
|
||||
|
||||
// result returns the offset i and bytes consumed p so far. If no suffix
|
||||
// matched, i and p will be 0.
|
||||
func (s *ctScanner) result() (i, p int) {
|
||||
return s.index, s.pindex
|
||||
}
|
||||
|
||||
func (s *ctScannerString) result() (i, p int) {
|
||||
return s.index, s.pindex
|
||||
}
|
||||
|
||||
const (
|
||||
final = 0
|
||||
noIndex = 0xFF
|
||||
)
|
||||
|
||||
// scan matches the longest suffix at the current location in the input
|
||||
// and returns the number of bytes consumed.
|
||||
func (s *ctScanner) scan(p int) int {
|
||||
pr := p // the p at the rune start
|
||||
str := s.s
|
||||
states, n := s.states, s.n
|
||||
for i := 0; i < n && p < len(str); {
|
||||
e := states[i]
|
||||
c := str[p]
|
||||
// TODO: a significant number of contractions are of a form that
|
||||
// cannot match discontiguous UTF-8 in a normalized string. We could let
|
||||
// a negative value of e.n mean that we can set s.done = true and avoid
|
||||
// the need for additional matches.
|
||||
if c >= e.L {
|
||||
if e.L == c {
|
||||
p++
|
||||
if e.I != noIndex {
|
||||
s.index = int(e.I)
|
||||
s.pindex = p
|
||||
}
|
||||
if e.N != final {
|
||||
i, states, n = 0, states[int(e.H)+n:], int(e.N)
|
||||
if p >= len(str) || utf8.RuneStart(str[p]) {
|
||||
s.states, s.n, pr = states, n, p
|
||||
}
|
||||
} else {
|
||||
s.done = true
|
||||
return p
|
||||
}
|
||||
continue
|
||||
} else if e.N == final && c <= e.H {
|
||||
p++
|
||||
s.done = true
|
||||
s.index = int(c-e.L) + int(e.I)
|
||||
s.pindex = p
|
||||
return p
|
||||
}
|
||||
}
|
||||
i++
|
||||
}
|
||||
return pr
|
||||
}
|
||||
|
||||
// scan is a verbatim copy of ctScanner.scan.
|
||||
func (s *ctScannerString) scan(p int) int {
|
||||
pr := p // the p at the rune start
|
||||
str := s.s
|
||||
states, n := s.states, s.n
|
||||
for i := 0; i < n && p < len(str); {
|
||||
e := states[i]
|
||||
c := str[p]
|
||||
// TODO: a significant number of contractions are of a form that
|
||||
// cannot match discontiguous UTF-8 in a normalized string. We could let
|
||||
// a negative value of e.n mean that we can set s.done = true and avoid
|
||||
// the need for additional matches.
|
||||
if c >= e.L {
|
||||
if e.L == c {
|
||||
p++
|
||||
if e.I != noIndex {
|
||||
s.index = int(e.I)
|
||||
s.pindex = p
|
||||
}
|
||||
if e.N != final {
|
||||
i, states, n = 0, states[int(e.H)+n:], int(e.N)
|
||||
if p >= len(str) || utf8.RuneStart(str[p]) {
|
||||
s.states, s.n, pr = states, n, p
|
||||
}
|
||||
} else {
|
||||
s.done = true
|
||||
return p
|
||||
}
|
||||
continue
|
||||
} else if e.N == final && c <= e.H {
|
||||
p++
|
||||
s.done = true
|
||||
s.index = int(c-e.L) + int(e.I)
|
||||
s.pindex = p
|
||||
return p
|
||||
}
|
||||
}
|
||||
i++
|
||||
}
|
||||
return pr
|
||||
}
|
178
vendor/golang.org/x/text/internal/colltab/iter.go
generated
vendored
178
vendor/golang.org/x/text/internal/colltab/iter.go
generated
vendored
@ -1,178 +0,0 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package colltab
|
||||
|
||||
// An Iter incrementally converts chunks of the input text to collation
|
||||
// elements, while ensuring that the collation elements are in normalized order
|
||||
// (that is, they are in the order as if the input text were normalized first).
|
||||
type Iter struct {
|
||||
Weighter Weighter
|
||||
Elems []Elem
|
||||
// N is the number of elements in Elems that will not be reordered on
|
||||
// subsequent iterations, N <= len(Elems).
|
||||
N int
|
||||
|
||||
bytes []byte
|
||||
str string
|
||||
// Because the Elems buffer may contain collation elements that are needed
|
||||
// for look-ahead, we need two positions in the text (bytes or str): one for
|
||||
// the end position in the text for the current iteration and one for the
|
||||
// start of the next call to appendNext.
|
||||
pEnd int // end position in text corresponding to N.
|
||||
pNext int // pEnd <= pNext.
|
||||
}
|
||||
|
||||
// Reset sets the position in the current input text to p and discards any
|
||||
// results obtained so far.
|
||||
func (i *Iter) Reset(p int) {
|
||||
i.Elems = i.Elems[:0]
|
||||
i.N = 0
|
||||
i.pEnd = p
|
||||
i.pNext = p
|
||||
}
|
||||
|
||||
// Len returns the length of the input text.
|
||||
func (i *Iter) Len() int {
|
||||
if i.bytes != nil {
|
||||
return len(i.bytes)
|
||||
}
|
||||
return len(i.str)
|
||||
}
|
||||
|
||||
// Discard removes the collation elements up to N.
|
||||
func (i *Iter) Discard() {
|
||||
// TODO: change this such that only modifiers following starters will have
|
||||
// to be copied.
|
||||
i.Elems = i.Elems[:copy(i.Elems, i.Elems[i.N:])]
|
||||
i.N = 0
|
||||
}
|
||||
|
||||
// End returns the end position of the input text for which Next has returned
|
||||
// results.
|
||||
func (i *Iter) End() int {
|
||||
return i.pEnd
|
||||
}
|
||||
|
||||
// SetInput resets i to input s.
|
||||
func (i *Iter) SetInput(s []byte) {
|
||||
i.bytes = s
|
||||
i.str = ""
|
||||
i.Reset(0)
|
||||
}
|
||||
|
||||
// SetInputString resets i to input s.
|
||||
func (i *Iter) SetInputString(s string) {
|
||||
i.str = s
|
||||
i.bytes = nil
|
||||
i.Reset(0)
|
||||
}
|
||||
|
||||
func (i *Iter) done() bool {
|
||||
return i.pNext >= len(i.str) && i.pNext >= len(i.bytes)
|
||||
}
|
||||
|
||||
func (i *Iter) appendNext() bool {
|
||||
if i.done() {
|
||||
return false
|
||||
}
|
||||
var sz int
|
||||
if i.bytes == nil {
|
||||
i.Elems, sz = i.Weighter.AppendNextString(i.Elems, i.str[i.pNext:])
|
||||
} else {
|
||||
i.Elems, sz = i.Weighter.AppendNext(i.Elems, i.bytes[i.pNext:])
|
||||
}
|
||||
if sz == 0 {
|
||||
sz = 1
|
||||
}
|
||||
i.pNext += sz
|
||||
return true
|
||||
}
|
||||
|
||||
// Next appends Elems to the internal array. On each iteration, it will either
|
||||
// add starters or modifiers. In the majority of cases, an Elem with a primary
|
||||
// value > 0 will have a CCC of 0. The CCC values of collation elements are also
|
||||
// used to detect if the input string was not normalized and to adjust the
|
||||
// result accordingly.
|
||||
func (i *Iter) Next() bool {
|
||||
if i.N == len(i.Elems) && !i.appendNext() {
|
||||
return false
|
||||
}
|
||||
|
||||
// Check if the current segment starts with a starter.
|
||||
prevCCC := i.Elems[len(i.Elems)-1].CCC()
|
||||
if prevCCC == 0 {
|
||||
i.N = len(i.Elems)
|
||||
i.pEnd = i.pNext
|
||||
return true
|
||||
} else if i.Elems[i.N].CCC() == 0 {
|
||||
// set i.N to only cover part of i.Elems for which prevCCC == 0 and
|
||||
// use rest for the next call to next.
|
||||
for i.N++; i.N < len(i.Elems) && i.Elems[i.N].CCC() == 0; i.N++ {
|
||||
}
|
||||
i.pEnd = i.pNext
|
||||
return true
|
||||
}
|
||||
|
||||
// The current (partial) segment starts with modifiers. We need to collect
|
||||
// all successive modifiers to ensure that they are normalized.
|
||||
for {
|
||||
p := len(i.Elems)
|
||||
i.pEnd = i.pNext
|
||||
if !i.appendNext() {
|
||||
break
|
||||
}
|
||||
|
||||
if ccc := i.Elems[p].CCC(); ccc == 0 || len(i.Elems)-i.N > maxCombiningCharacters {
|
||||
// Leave the starter for the next iteration. This ensures that we
|
||||
// do not return sequences of collation elements that cross two
|
||||
// segments.
|
||||
//
|
||||
// TODO: handle large number of combining characters by fully
|
||||
// normalizing the input segment before iteration. This ensures
|
||||
// results are consistent across the text repo.
|
||||
i.N = p
|
||||
return true
|
||||
} else if ccc < prevCCC {
|
||||
i.doNorm(p, ccc) // should be rare, never occurs for NFD and FCC.
|
||||
} else {
|
||||
prevCCC = ccc
|
||||
}
|
||||
}
|
||||
|
||||
done := len(i.Elems) != i.N
|
||||
i.N = len(i.Elems)
|
||||
return done
|
||||
}
|
||||
|
||||
// nextNoNorm is the same as next, but does not "normalize" the collation
|
||||
// elements.
|
||||
func (i *Iter) nextNoNorm() bool {
|
||||
// TODO: remove this function. Using this instead of next does not seem
|
||||
// to improve performance in any significant way. We retain this until
|
||||
// later for evaluation purposes.
|
||||
if i.done() {
|
||||
return false
|
||||
}
|
||||
i.appendNext()
|
||||
i.N = len(i.Elems)
|
||||
return true
|
||||
}
|
||||
|
||||
const maxCombiningCharacters = 30
|
||||
|
||||
// doNorm reorders the collation elements in i.Elems.
|
||||
// It assumes that blocks of collation elements added with appendNext
|
||||
// either start and end with the same CCC or start with CCC == 0.
|
||||
// This allows for a single insertion point for the entire block.
|
||||
// The correctness of this assumption is verified in builder.go.
|
||||
func (i *Iter) doNorm(p int, ccc uint8) {
|
||||
n := len(i.Elems)
|
||||
k := p
|
||||
for p--; p > i.N && ccc < i.Elems[p-1].CCC(); p-- {
|
||||
}
|
||||
i.Elems = append(i.Elems, i.Elems[p:k]...)
|
||||
copy(i.Elems[p:], i.Elems[k:])
|
||||
i.Elems = i.Elems[:n]
|
||||
}
|
236
vendor/golang.org/x/text/internal/colltab/numeric.go
generated
vendored
236
vendor/golang.org/x/text/internal/colltab/numeric.go
generated
vendored
@ -1,236 +0,0 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package colltab
|
||||
|
||||
import (
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// NewNumericWeighter wraps w to replace individual digits to sort based on their
|
||||
// numeric value.
|
||||
//
|
||||
// Weighter w must have a free primary weight after the primary weight for 9.
|
||||
// If this is not the case, numeric value will sort at the same primary level
|
||||
// as the first primary sorting after 9.
|
||||
func NewNumericWeighter(w Weighter) Weighter {
|
||||
getElem := func(s string) Elem {
|
||||
elems, _ := w.AppendNextString(nil, s)
|
||||
return elems[0]
|
||||
}
|
||||
nine := getElem("9")
|
||||
|
||||
// Numbers should order before zero, but the DUCET has no room for this.
|
||||
// TODO: move before zero once we use fractional collation elements.
|
||||
ns, _ := MakeElem(nine.Primary()+1, nine.Secondary(), int(nine.Tertiary()), 0)
|
||||
|
||||
return &numericWeighter{
|
||||
Weighter: w,
|
||||
|
||||
// We assume that w sorts digits of different kinds in order of numeric
|
||||
// value and that the tertiary weight order is preserved.
|
||||
//
|
||||
// TODO: evaluate whether it is worth basing the ranges on the Elem
|
||||
// encoding itself once the move to fractional weights is complete.
|
||||
zero: getElem("0"),
|
||||
zeroSpecialLo: getElem("0"), // U+FF10 FULLWIDTH DIGIT ZERO
|
||||
zeroSpecialHi: getElem("₀"), // U+2080 SUBSCRIPT ZERO
|
||||
nine: nine,
|
||||
nineSpecialHi: getElem("₉"), // U+2089 SUBSCRIPT NINE
|
||||
numberStart: ns,
|
||||
}
|
||||
}
|
||||
|
||||
// A numericWeighter translates a stream of digits into a stream of weights
|
||||
// representing the numeric value.
|
||||
type numericWeighter struct {
|
||||
Weighter
|
||||
|
||||
// The Elems below all demarcate boundaries of specific ranges. With the
|
||||
// current element encoding digits are in two ranges: normal (default
|
||||
// tertiary value) and special. For most languages, digits have collation
|
||||
// elements in the normal range.
|
||||
//
|
||||
// Note: the range tests are very specific for the element encoding used by
|
||||
// this implementation. The tests in collate_test.go are designed to fail
|
||||
// if this code is not updated when an encoding has changed.
|
||||
|
||||
zero Elem // normal digit zero
|
||||
zeroSpecialLo Elem // special digit zero, low tertiary value
|
||||
zeroSpecialHi Elem // special digit zero, high tertiary value
|
||||
nine Elem // normal digit nine
|
||||
nineSpecialHi Elem // special digit nine
|
||||
numberStart Elem
|
||||
}
|
||||
|
||||
// AppendNext calls the namesake of the underlying weigher, but replaces single
|
||||
// digits with weights representing their value.
|
||||
func (nw *numericWeighter) AppendNext(buf []Elem, s []byte) (ce []Elem, n int) {
|
||||
ce, n = nw.Weighter.AppendNext(buf, s)
|
||||
nc := numberConverter{
|
||||
elems: buf,
|
||||
w: nw,
|
||||
b: s,
|
||||
}
|
||||
isZero, ok := nc.checkNextDigit(ce)
|
||||
if !ok {
|
||||
return ce, n
|
||||
}
|
||||
// ce might have been grown already, so take it instead of buf.
|
||||
nc.init(ce, len(buf), isZero)
|
||||
for n < len(s) {
|
||||
ce, sz := nw.Weighter.AppendNext(nc.elems, s[n:])
|
||||
nc.b = s
|
||||
n += sz
|
||||
if !nc.update(ce) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return nc.result(), n
|
||||
}
|
||||
|
||||
// AppendNextString calls the namesake of the underlying weigher, but replaces
|
||||
// single digits with weights representing their value.
|
||||
func (nw *numericWeighter) AppendNextString(buf []Elem, s string) (ce []Elem, n int) {
|
||||
ce, n = nw.Weighter.AppendNextString(buf, s)
|
||||
nc := numberConverter{
|
||||
elems: buf,
|
||||
w: nw,
|
||||
s: s,
|
||||
}
|
||||
isZero, ok := nc.checkNextDigit(ce)
|
||||
if !ok {
|
||||
return ce, n
|
||||
}
|
||||
nc.init(ce, len(buf), isZero)
|
||||
for n < len(s) {
|
||||
ce, sz := nw.Weighter.AppendNextString(nc.elems, s[n:])
|
||||
nc.s = s
|
||||
n += sz
|
||||
if !nc.update(ce) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return nc.result(), n
|
||||
}
|
||||
|
||||
type numberConverter struct {
|
||||
w *numericWeighter
|
||||
|
||||
elems []Elem
|
||||
nDigits int
|
||||
lenIndex int
|
||||
|
||||
s string // set if the input was of type string
|
||||
b []byte // set if the input was of type []byte
|
||||
}
|
||||
|
||||
// init completes initialization of a numberConverter and prepares it for adding
|
||||
// more digits. elems is assumed to have a digit starting at oldLen.
|
||||
func (nc *numberConverter) init(elems []Elem, oldLen int, isZero bool) {
|
||||
// Insert a marker indicating the start of a number and and a placeholder
|
||||
// for the number of digits.
|
||||
if isZero {
|
||||
elems = append(elems[:oldLen], nc.w.numberStart, 0)
|
||||
} else {
|
||||
elems = append(elems, 0, 0)
|
||||
copy(elems[oldLen+2:], elems[oldLen:])
|
||||
elems[oldLen] = nc.w.numberStart
|
||||
elems[oldLen+1] = 0
|
||||
|
||||
nc.nDigits = 1
|
||||
}
|
||||
nc.elems = elems
|
||||
nc.lenIndex = oldLen + 1
|
||||
}
|
||||
|
||||
// checkNextDigit reports whether bufNew adds a single digit relative to the old
|
||||
// buffer. If it does, it also reports whether this digit is zero.
|
||||
func (nc *numberConverter) checkNextDigit(bufNew []Elem) (isZero, ok bool) {
|
||||
if len(nc.elems) >= len(bufNew) {
|
||||
return false, false
|
||||
}
|
||||
e := bufNew[len(nc.elems)]
|
||||
if e < nc.w.zeroSpecialLo || nc.w.nine < e {
|
||||
// Not a number.
|
||||
return false, false
|
||||
}
|
||||
if e < nc.w.zero {
|
||||
if e > nc.w.nineSpecialHi {
|
||||
// Not a number.
|
||||
return false, false
|
||||
}
|
||||
if !nc.isDigit() {
|
||||
return false, false
|
||||
}
|
||||
isZero = e <= nc.w.zeroSpecialHi
|
||||
} else {
|
||||
// This is the common case if we encounter a digit.
|
||||
isZero = e == nc.w.zero
|
||||
}
|
||||
// Test the remaining added collation elements have a zero primary value.
|
||||
if n := len(bufNew) - len(nc.elems); n > 1 {
|
||||
for i := len(nc.elems) + 1; i < len(bufNew); i++ {
|
||||
if bufNew[i].Primary() != 0 {
|
||||
return false, false
|
||||
}
|
||||
}
|
||||
// In some rare cases, collation elements will encode runes in
|
||||
// unicode.No as a digit. For example Ethiopic digits (U+1369 - U+1371)
|
||||
// are not in Nd. Also some digits that clearly belong in unicode.No,
|
||||
// like U+0C78 TELUGU FRACTION DIGIT ZERO FOR ODD POWERS OF FOUR, have
|
||||
// collation elements indistinguishable from normal digits.
|
||||
// Unfortunately, this means we need to make this check for nearly all
|
||||
// non-Latin digits.
|
||||
//
|
||||
// TODO: check the performance impact and find something better if it is
|
||||
// an issue.
|
||||
if !nc.isDigit() {
|
||||
return false, false
|
||||
}
|
||||
}
|
||||
return isZero, true
|
||||
}
|
||||
|
||||
func (nc *numberConverter) isDigit() bool {
|
||||
if nc.b != nil {
|
||||
r, _ := utf8.DecodeRune(nc.b)
|
||||
return unicode.In(r, unicode.Nd)
|
||||
}
|
||||
r, _ := utf8.DecodeRuneInString(nc.s)
|
||||
return unicode.In(r, unicode.Nd)
|
||||
}
|
||||
|
||||
// We currently support a maximum of about 2M digits (the number of primary
|
||||
// values). Such numbers will compare correctly against small numbers, but their
|
||||
// comparison against other large numbers is undefined.
|
||||
//
|
||||
// TODO: define a proper fallback, such as comparing large numbers textually or
|
||||
// actually allowing numbers of unlimited length.
|
||||
//
|
||||
// TODO: cap this to a lower number (like 100) and maybe allow a larger number
|
||||
// in an option?
|
||||
const maxDigits = 1<<maxPrimaryBits - 1
|
||||
|
||||
func (nc *numberConverter) update(elems []Elem) bool {
|
||||
isZero, ok := nc.checkNextDigit(elems)
|
||||
if nc.nDigits == 0 && isZero {
|
||||
return true
|
||||
}
|
||||
nc.elems = elems
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
nc.nDigits++
|
||||
return nc.nDigits < maxDigits
|
||||
}
|
||||
|
||||
// result fills in the length element for the digit sequence and returns the
|
||||
// completed collation elements.
|
||||
func (nc *numberConverter) result() []Elem {
|
||||
e, _ := MakeElem(nc.nDigits, defaultSecondary, defaultTertiary, 0)
|
||||
nc.elems[nc.lenIndex] = e
|
||||
return nc.elems
|
||||
}
|
275
vendor/golang.org/x/text/internal/colltab/table.go
generated
vendored
275
vendor/golang.org/x/text/internal/colltab/table.go
generated
vendored
@ -1,275 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package colltab
|
||||
|
||||
import (
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
// Table holds all collation data for a given collation ordering.
|
||||
type Table struct {
|
||||
Index Trie // main trie
|
||||
|
||||
// expansion info
|
||||
ExpandElem []uint32
|
||||
|
||||
// contraction info
|
||||
ContractTries ContractTrieSet
|
||||
ContractElem []uint32
|
||||
MaxContractLen int
|
||||
VariableTop uint32
|
||||
}
|
||||
|
||||
func (t *Table) AppendNext(w []Elem, b []byte) (res []Elem, n int) {
|
||||
return t.appendNext(w, source{bytes: b})
|
||||
}
|
||||
|
||||
func (t *Table) AppendNextString(w []Elem, s string) (res []Elem, n int) {
|
||||
return t.appendNext(w, source{str: s})
|
||||
}
|
||||
|
||||
func (t *Table) Start(p int, b []byte) int {
|
||||
// TODO: implement
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func (t *Table) StartString(p int, s string) int {
|
||||
// TODO: implement
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func (t *Table) Domain() []string {
|
||||
// TODO: implement
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func (t *Table) Top() uint32 {
|
||||
return t.VariableTop
|
||||
}
|
||||
|
||||
type source struct {
|
||||
str string
|
||||
bytes []byte
|
||||
}
|
||||
|
||||
func (src *source) lookup(t *Table) (ce Elem, sz int) {
|
||||
if src.bytes == nil {
|
||||
return t.Index.lookupString(src.str)
|
||||
}
|
||||
return t.Index.lookup(src.bytes)
|
||||
}
|
||||
|
||||
func (src *source) tail(sz int) {
|
||||
if src.bytes == nil {
|
||||
src.str = src.str[sz:]
|
||||
} else {
|
||||
src.bytes = src.bytes[sz:]
|
||||
}
|
||||
}
|
||||
|
||||
func (src *source) nfd(buf []byte, end int) []byte {
|
||||
if src.bytes == nil {
|
||||
return norm.NFD.AppendString(buf[:0], src.str[:end])
|
||||
}
|
||||
return norm.NFD.Append(buf[:0], src.bytes[:end]...)
|
||||
}
|
||||
|
||||
func (src *source) rune() (r rune, sz int) {
|
||||
if src.bytes == nil {
|
||||
return utf8.DecodeRuneInString(src.str)
|
||||
}
|
||||
return utf8.DecodeRune(src.bytes)
|
||||
}
|
||||
|
||||
func (src *source) properties(f norm.Form) norm.Properties {
|
||||
if src.bytes == nil {
|
||||
return f.PropertiesString(src.str)
|
||||
}
|
||||
return f.Properties(src.bytes)
|
||||
}
|
||||
|
||||
// appendNext appends the weights corresponding to the next rune or
|
||||
// contraction in s. If a contraction is matched to a discontinuous
|
||||
// sequence of runes, the weights for the interstitial runes are
|
||||
// appended as well. It returns a new slice that includes the appended
|
||||
// weights and the number of bytes consumed from s.
|
||||
func (t *Table) appendNext(w []Elem, src source) (res []Elem, n int) {
|
||||
ce, sz := src.lookup(t)
|
||||
tp := ce.ctype()
|
||||
if tp == ceNormal {
|
||||
if ce == 0 {
|
||||
r, _ := src.rune()
|
||||
const (
|
||||
hangulSize = 3
|
||||
firstHangul = 0xAC00
|
||||
lastHangul = 0xD7A3
|
||||
)
|
||||
if r >= firstHangul && r <= lastHangul {
|
||||
// TODO: performance can be considerably improved here.
|
||||
n = sz
|
||||
var buf [16]byte // Used for decomposing Hangul.
|
||||
for b := src.nfd(buf[:0], hangulSize); len(b) > 0; b = b[sz:] {
|
||||
ce, sz = t.Index.lookup(b)
|
||||
w = append(w, ce)
|
||||
}
|
||||
return w, n
|
||||
}
|
||||
ce = makeImplicitCE(implicitPrimary(r))
|
||||
}
|
||||
w = append(w, ce)
|
||||
} else if tp == ceExpansionIndex {
|
||||
w = t.appendExpansion(w, ce)
|
||||
} else if tp == ceContractionIndex {
|
||||
n := 0
|
||||
src.tail(sz)
|
||||
if src.bytes == nil {
|
||||
w, n = t.matchContractionString(w, ce, src.str)
|
||||
} else {
|
||||
w, n = t.matchContraction(w, ce, src.bytes)
|
||||
}
|
||||
sz += n
|
||||
} else if tp == ceDecompose {
|
||||
// Decompose using NFKD and replace tertiary weights.
|
||||
t1, t2 := splitDecompose(ce)
|
||||
i := len(w)
|
||||
nfkd := src.properties(norm.NFKD).Decomposition()
|
||||
for p := 0; len(nfkd) > 0; nfkd = nfkd[p:] {
|
||||
w, p = t.appendNext(w, source{bytes: nfkd})
|
||||
}
|
||||
w[i] = w[i].updateTertiary(t1)
|
||||
if i++; i < len(w) {
|
||||
w[i] = w[i].updateTertiary(t2)
|
||||
for i++; i < len(w); i++ {
|
||||
w[i] = w[i].updateTertiary(maxTertiary)
|
||||
}
|
||||
}
|
||||
}
|
||||
return w, sz
|
||||
}
|
||||
|
||||
func (t *Table) appendExpansion(w []Elem, ce Elem) []Elem {
|
||||
i := splitExpandIndex(ce)
|
||||
n := int(t.ExpandElem[i])
|
||||
i++
|
||||
for _, ce := range t.ExpandElem[i : i+n] {
|
||||
w = append(w, Elem(ce))
|
||||
}
|
||||
return w
|
||||
}
|
||||
|
||||
func (t *Table) matchContraction(w []Elem, ce Elem, suffix []byte) ([]Elem, int) {
|
||||
index, n, offset := splitContractIndex(ce)
|
||||
|
||||
scan := t.ContractTries.scanner(index, n, suffix)
|
||||
buf := [norm.MaxSegmentSize]byte{}
|
||||
bufp := 0
|
||||
p := scan.scan(0)
|
||||
|
||||
if !scan.done && p < len(suffix) && suffix[p] >= utf8.RuneSelf {
|
||||
// By now we should have filtered most cases.
|
||||
p0 := p
|
||||
bufn := 0
|
||||
rune := norm.NFD.Properties(suffix[p:])
|
||||
p += rune.Size()
|
||||
if rune.LeadCCC() != 0 {
|
||||
prevCC := rune.TrailCCC()
|
||||
// A gap may only occur in the last normalization segment.
|
||||
// This also ensures that len(scan.s) < norm.MaxSegmentSize.
|
||||
if end := norm.NFD.FirstBoundary(suffix[p:]); end != -1 {
|
||||
scan.s = suffix[:p+end]
|
||||
}
|
||||
for p < len(suffix) && !scan.done && suffix[p] >= utf8.RuneSelf {
|
||||
rune = norm.NFD.Properties(suffix[p:])
|
||||
if ccc := rune.LeadCCC(); ccc == 0 || prevCC >= ccc {
|
||||
break
|
||||
}
|
||||
prevCC = rune.TrailCCC()
|
||||
if pp := scan.scan(p); pp != p {
|
||||
// Copy the interstitial runes for later processing.
|
||||
bufn += copy(buf[bufn:], suffix[p0:p])
|
||||
if scan.pindex == pp {
|
||||
bufp = bufn
|
||||
}
|
||||
p, p0 = pp, pp
|
||||
} else {
|
||||
p += rune.Size()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Append weights for the matched contraction, which may be an expansion.
|
||||
i, n := scan.result()
|
||||
ce = Elem(t.ContractElem[i+offset])
|
||||
if ce.ctype() == ceNormal {
|
||||
w = append(w, ce)
|
||||
} else {
|
||||
w = t.appendExpansion(w, ce)
|
||||
}
|
||||
// Append weights for the runes in the segment not part of the contraction.
|
||||
for b, p := buf[:bufp], 0; len(b) > 0; b = b[p:] {
|
||||
w, p = t.appendNext(w, source{bytes: b})
|
||||
}
|
||||
return w, n
|
||||
}
|
||||
|
||||
// TODO: unify the two implementations. This is best done after first simplifying
|
||||
// the algorithm taking into account the inclusion of both NFC and NFD forms
|
||||
// in the table.
|
||||
func (t *Table) matchContractionString(w []Elem, ce Elem, suffix string) ([]Elem, int) {
|
||||
index, n, offset := splitContractIndex(ce)
|
||||
|
||||
scan := t.ContractTries.scannerString(index, n, suffix)
|
||||
buf := [norm.MaxSegmentSize]byte{}
|
||||
bufp := 0
|
||||
p := scan.scan(0)
|
||||
|
||||
if !scan.done && p < len(suffix) && suffix[p] >= utf8.RuneSelf {
|
||||
// By now we should have filtered most cases.
|
||||
p0 := p
|
||||
bufn := 0
|
||||
rune := norm.NFD.PropertiesString(suffix[p:])
|
||||
p += rune.Size()
|
||||
if rune.LeadCCC() != 0 {
|
||||
prevCC := rune.TrailCCC()
|
||||
// A gap may only occur in the last normalization segment.
|
||||
// This also ensures that len(scan.s) < norm.MaxSegmentSize.
|
||||
if end := norm.NFD.FirstBoundaryInString(suffix[p:]); end != -1 {
|
||||
scan.s = suffix[:p+end]
|
||||
}
|
||||
for p < len(suffix) && !scan.done && suffix[p] >= utf8.RuneSelf {
|
||||
rune = norm.NFD.PropertiesString(suffix[p:])
|
||||
if ccc := rune.LeadCCC(); ccc == 0 || prevCC >= ccc {
|
||||
break
|
||||
}
|
||||
prevCC = rune.TrailCCC()
|
||||
if pp := scan.scan(p); pp != p {
|
||||
// Copy the interstitial runes for later processing.
|
||||
bufn += copy(buf[bufn:], suffix[p0:p])
|
||||
if scan.pindex == pp {
|
||||
bufp = bufn
|
||||
}
|
||||
p, p0 = pp, pp
|
||||
} else {
|
||||
p += rune.Size()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Append weights for the matched contraction, which may be an expansion.
|
||||
i, n := scan.result()
|
||||
ce = Elem(t.ContractElem[i+offset])
|
||||
if ce.ctype() == ceNormal {
|
||||
w = append(w, ce)
|
||||
} else {
|
||||
w = t.appendExpansion(w, ce)
|
||||
}
|
||||
// Append weights for the runes in the segment not part of the contraction.
|
||||
for b, p := buf[:bufp], 0; len(b) > 0; b = b[p:] {
|
||||
w, p = t.appendNext(w, source{bytes: b})
|
||||
}
|
||||
return w, n
|
||||
}
|
159
vendor/golang.org/x/text/internal/colltab/trie.go
generated
vendored
159
vendor/golang.org/x/text/internal/colltab/trie.go
generated
vendored
@ -1,159 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// The trie in this file is used to associate the first full character in an
|
||||
// UTF-8 string to a collation element. All but the last byte in a UTF-8 byte
|
||||
// sequence are used to lookup offsets in the index table to be used for the
|
||||
// next byte. The last byte is used to index into a table of collation elements.
|
||||
// For a full description, see go.text/collate/build/trie.go.
|
||||
|
||||
package colltab
|
||||
|
||||
const blockSize = 64
|
||||
|
||||
type Trie struct {
|
||||
Index0 []uint16 // index for first byte (0xC0-0xFF)
|
||||
Values0 []uint32 // index for first byte (0x00-0x7F)
|
||||
Index []uint16
|
||||
Values []uint32
|
||||
}
|
||||
|
||||
const (
|
||||
t1 = 0x00 // 0000 0000
|
||||
tx = 0x80 // 1000 0000
|
||||
t2 = 0xC0 // 1100 0000
|
||||
t3 = 0xE0 // 1110 0000
|
||||
t4 = 0xF0 // 1111 0000
|
||||
t5 = 0xF8 // 1111 1000
|
||||
t6 = 0xFC // 1111 1100
|
||||
te = 0xFE // 1111 1110
|
||||
)
|
||||
|
||||
func (t *Trie) lookupValue(n uint16, b byte) Elem {
|
||||
return Elem(t.Values[int(n)<<6+int(b)])
|
||||
}
|
||||
|
||||
// lookup returns the trie value for the first UTF-8 encoding in s and
|
||||
// the width in bytes of this encoding. The size will be 0 if s does not
|
||||
// hold enough bytes to complete the encoding. len(s) must be greater than 0.
|
||||
func (t *Trie) lookup(s []byte) (v Elem, sz int) {
|
||||
c0 := s[0]
|
||||
switch {
|
||||
case c0 < tx:
|
||||
return Elem(t.Values0[c0]), 1
|
||||
case c0 < t2:
|
||||
return 0, 1
|
||||
case c0 < t3:
|
||||
if len(s) < 2 {
|
||||
return 0, 0
|
||||
}
|
||||
i := t.Index0[c0]
|
||||
c1 := s[1]
|
||||
if c1 < tx || t2 <= c1 {
|
||||
return 0, 1
|
||||
}
|
||||
return t.lookupValue(i, c1), 2
|
||||
case c0 < t4:
|
||||
if len(s) < 3 {
|
||||
return 0, 0
|
||||
}
|
||||
i := t.Index0[c0]
|
||||
c1 := s[1]
|
||||
if c1 < tx || t2 <= c1 {
|
||||
return 0, 1
|
||||
}
|
||||
o := int(i)<<6 + int(c1)
|
||||
i = t.Index[o]
|
||||
c2 := s[2]
|
||||
if c2 < tx || t2 <= c2 {
|
||||
return 0, 2
|
||||
}
|
||||
return t.lookupValue(i, c2), 3
|
||||
case c0 < t5:
|
||||
if len(s) < 4 {
|
||||
return 0, 0
|
||||
}
|
||||
i := t.Index0[c0]
|
||||
c1 := s[1]
|
||||
if c1 < tx || t2 <= c1 {
|
||||
return 0, 1
|
||||
}
|
||||
o := int(i)<<6 + int(c1)
|
||||
i = t.Index[o]
|
||||
c2 := s[2]
|
||||
if c2 < tx || t2 <= c2 {
|
||||
return 0, 2
|
||||
}
|
||||
o = int(i)<<6 + int(c2)
|
||||
i = t.Index[o]
|
||||
c3 := s[3]
|
||||
if c3 < tx || t2 <= c3 {
|
||||
return 0, 3
|
||||
}
|
||||
return t.lookupValue(i, c3), 4
|
||||
}
|
||||
// Illegal rune
|
||||
return 0, 1
|
||||
}
|
||||
|
||||
// The body of lookupString is a verbatim copy of that of lookup.
|
||||
func (t *Trie) lookupString(s string) (v Elem, sz int) {
|
||||
c0 := s[0]
|
||||
switch {
|
||||
case c0 < tx:
|
||||
return Elem(t.Values0[c0]), 1
|
||||
case c0 < t2:
|
||||
return 0, 1
|
||||
case c0 < t3:
|
||||
if len(s) < 2 {
|
||||
return 0, 0
|
||||
}
|
||||
i := t.Index0[c0]
|
||||
c1 := s[1]
|
||||
if c1 < tx || t2 <= c1 {
|
||||
return 0, 1
|
||||
}
|
||||
return t.lookupValue(i, c1), 2
|
||||
case c0 < t4:
|
||||
if len(s) < 3 {
|
||||
return 0, 0
|
||||
}
|
||||
i := t.Index0[c0]
|
||||
c1 := s[1]
|
||||
if c1 < tx || t2 <= c1 {
|
||||
return 0, 1
|
||||
}
|
||||
o := int(i)<<6 + int(c1)
|
||||
i = t.Index[o]
|
||||
c2 := s[2]
|
||||
if c2 < tx || t2 <= c2 {
|
||||
return 0, 2
|
||||
}
|
||||
return t.lookupValue(i, c2), 3
|
||||
case c0 < t5:
|
||||
if len(s) < 4 {
|
||||
return 0, 0
|
||||
}
|
||||
i := t.Index0[c0]
|
||||
c1 := s[1]
|
||||
if c1 < tx || t2 <= c1 {
|
||||
return 0, 1
|
||||
}
|
||||
o := int(i)<<6 + int(c1)
|
||||
i = t.Index[o]
|
||||
c2 := s[2]
|
||||
if c2 < tx || t2 <= c2 {
|
||||
return 0, 2
|
||||
}
|
||||
o = int(i)<<6 + int(c2)
|
||||
i = t.Index[o]
|
||||
c3 := s[3]
|
||||
if c3 < tx || t2 <= c3 {
|
||||
return 0, 3
|
||||
}
|
||||
return t.lookupValue(i, c3), 4
|
||||
}
|
||||
// Illegal rune
|
||||
return 0, 1
|
||||
}
|
31
vendor/golang.org/x/text/internal/colltab/weighter.go
generated
vendored
31
vendor/golang.org/x/text/internal/colltab/weighter.go
generated
vendored
@ -1,31 +0,0 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package colltab // import "golang.org/x/text/internal/colltab"
|
||||
|
||||
// A Weighter can be used as a source for Collator and Searcher.
|
||||
type Weighter interface {
|
||||
// Start finds the start of the segment that includes position p.
|
||||
Start(p int, b []byte) int
|
||||
|
||||
// StartString finds the start of the segment that includes position p.
|
||||
StartString(p int, s string) int
|
||||
|
||||
// AppendNext appends Elems to buf corresponding to the longest match
|
||||
// of a single character or contraction from the start of s.
|
||||
// It returns the new buf and the number of bytes consumed.
|
||||
AppendNext(buf []Elem, s []byte) (ce []Elem, n int)
|
||||
|
||||
// AppendNextString appends Elems to buf corresponding to the longest match
|
||||
// of a single character or contraction from the start of s.
|
||||
// It returns the new buf and the number of bytes consumed.
|
||||
AppendNextString(buf []Elem, s string) (ce []Elem, n int)
|
||||
|
||||
// Domain returns a slice of all single characters and contractions for which
|
||||
// collation elements are defined in this table.
|
||||
Domain() []string
|
||||
|
||||
// Top returns the highest variable primary value.
|
||||
Top() uint32
|
||||
}
|
369
vendor/golang.org/x/text/internal/gen/code.go
generated
vendored
369
vendor/golang.org/x/text/internal/gen/code.go
generated
vendored
@ -1,369 +0,0 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gen
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/gob"
|
||||
"fmt"
|
||||
"hash"
|
||||
"hash/fnv"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// This file contains utilities for generating code.
|
||||
|
||||
// TODO: other write methods like:
|
||||
// - slices, maps, types, etc.
|
||||
|
||||
// CodeWriter is a utility for writing structured code. It computes the content
|
||||
// hash and size of written content. It ensures there are newlines between
|
||||
// written code blocks.
|
||||
type CodeWriter struct {
|
||||
buf bytes.Buffer
|
||||
Size int
|
||||
Hash hash.Hash32 // content hash
|
||||
gob *gob.Encoder
|
||||
// For comments we skip the usual one-line separator if they are followed by
|
||||
// a code block.
|
||||
skipSep bool
|
||||
}
|
||||
|
||||
func (w *CodeWriter) Write(p []byte) (n int, err error) {
|
||||
return w.buf.Write(p)
|
||||
}
|
||||
|
||||
// NewCodeWriter returns a new CodeWriter.
|
||||
func NewCodeWriter() *CodeWriter {
|
||||
h := fnv.New32()
|
||||
return &CodeWriter{Hash: h, gob: gob.NewEncoder(h)}
|
||||
}
|
||||
|
||||
// WriteGoFile appends the buffer with the total size of all created structures
|
||||
// and writes it as a Go file to the the given file with the given package name.
|
||||
func (w *CodeWriter) WriteGoFile(filename, pkg string) {
|
||||
f, err := os.Create(filename)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not create file %s: %v", filename, err)
|
||||
}
|
||||
defer f.Close()
|
||||
if _, err = w.WriteGo(f, pkg, ""); err != nil {
|
||||
log.Fatalf("Error writing file %s: %v", filename, err)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteVersionedGoFile appends the buffer with the total size of all created
|
||||
// structures and writes it as a Go file to the the given file with the given
|
||||
// package name and build tags for the current Unicode version,
|
||||
func (w *CodeWriter) WriteVersionedGoFile(filename, pkg string) {
|
||||
tags := buildTags()
|
||||
if tags != "" {
|
||||
filename = insertVersion(filename, UnicodeVersion())
|
||||
}
|
||||
f, err := os.Create(filename)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not create file %s: %v", filename, err)
|
||||
}
|
||||
defer f.Close()
|
||||
if _, err = w.WriteGo(f, pkg, tags); err != nil {
|
||||
log.Fatalf("Error writing file %s: %v", filename, err)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteGo appends the buffer with the total size of all created structures and
|
||||
// writes it as a Go file to the the given writer with the given package name.
|
||||
func (w *CodeWriter) WriteGo(out io.Writer, pkg, tags string) (n int, err error) {
|
||||
sz := w.Size
|
||||
w.WriteComment("Total table size %d bytes (%dKiB); checksum: %X\n", sz, sz/1024, w.Hash.Sum32())
|
||||
defer w.buf.Reset()
|
||||
return WriteGo(out, pkg, tags, w.buf.Bytes())
|
||||
}
|
||||
|
||||
func (w *CodeWriter) printf(f string, x ...interface{}) {
|
||||
fmt.Fprintf(w, f, x...)
|
||||
}
|
||||
|
||||
func (w *CodeWriter) insertSep() {
|
||||
if w.skipSep {
|
||||
w.skipSep = false
|
||||
return
|
||||
}
|
||||
// Use at least two newlines to ensure a blank space between the previous
|
||||
// block. WriteGoFile will remove extraneous newlines.
|
||||
w.printf("\n\n")
|
||||
}
|
||||
|
||||
// WriteComment writes a comment block. All line starts are prefixed with "//".
|
||||
// Initial empty lines are gobbled. The indentation for the first line is
|
||||
// stripped from consecutive lines.
|
||||
func (w *CodeWriter) WriteComment(comment string, args ...interface{}) {
|
||||
s := fmt.Sprintf(comment, args...)
|
||||
s = strings.Trim(s, "\n")
|
||||
|
||||
// Use at least two newlines to ensure a blank space between the previous
|
||||
// block. WriteGoFile will remove extraneous newlines.
|
||||
w.printf("\n\n// ")
|
||||
w.skipSep = true
|
||||
|
||||
// strip first indent level.
|
||||
sep := "\n"
|
||||
for ; len(s) > 0 && (s[0] == '\t' || s[0] == ' '); s = s[1:] {
|
||||
sep += s[:1]
|
||||
}
|
||||
|
||||
strings.NewReplacer(sep, "\n// ", "\n", "\n// ").WriteString(w, s)
|
||||
|
||||
w.printf("\n")
|
||||
}
|
||||
|
||||
func (w *CodeWriter) writeSizeInfo(size int) {
|
||||
w.printf("// Size: %d bytes\n", size)
|
||||
}
|
||||
|
||||
// WriteConst writes a constant of the given name and value.
|
||||
func (w *CodeWriter) WriteConst(name string, x interface{}) {
|
||||
w.insertSep()
|
||||
v := reflect.ValueOf(x)
|
||||
|
||||
switch v.Type().Kind() {
|
||||
case reflect.String:
|
||||
w.printf("const %s %s = ", name, typeName(x))
|
||||
w.WriteString(v.String())
|
||||
w.printf("\n")
|
||||
default:
|
||||
w.printf("const %s = %#v\n", name, x)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteVar writes a variable of the given name and value.
|
||||
func (w *CodeWriter) WriteVar(name string, x interface{}) {
|
||||
w.insertSep()
|
||||
v := reflect.ValueOf(x)
|
||||
oldSize := w.Size
|
||||
sz := int(v.Type().Size())
|
||||
w.Size += sz
|
||||
|
||||
switch v.Type().Kind() {
|
||||
case reflect.String:
|
||||
w.printf("var %s %s = ", name, typeName(x))
|
||||
w.WriteString(v.String())
|
||||
case reflect.Struct:
|
||||
w.gob.Encode(x)
|
||||
fallthrough
|
||||
case reflect.Slice, reflect.Array:
|
||||
w.printf("var %s = ", name)
|
||||
w.writeValue(v)
|
||||
w.writeSizeInfo(w.Size - oldSize)
|
||||
default:
|
||||
w.printf("var %s %s = ", name, typeName(x))
|
||||
w.gob.Encode(x)
|
||||
w.writeValue(v)
|
||||
w.writeSizeInfo(w.Size - oldSize)
|
||||
}
|
||||
w.printf("\n")
|
||||
}
|
||||
|
||||
func (w *CodeWriter) writeValue(v reflect.Value) {
|
||||
x := v.Interface()
|
||||
switch v.Kind() {
|
||||
case reflect.String:
|
||||
w.WriteString(v.String())
|
||||
case reflect.Array:
|
||||
// Don't double count: callers of WriteArray count on the size being
|
||||
// added, so we need to discount it here.
|
||||
w.Size -= int(v.Type().Size())
|
||||
w.writeSlice(x, true)
|
||||
case reflect.Slice:
|
||||
w.writeSlice(x, false)
|
||||
case reflect.Struct:
|
||||
w.printf("%s{\n", typeName(v.Interface()))
|
||||
t := v.Type()
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
w.printf("%s: ", t.Field(i).Name)
|
||||
w.writeValue(v.Field(i))
|
||||
w.printf(",\n")
|
||||
}
|
||||
w.printf("}")
|
||||
default:
|
||||
w.printf("%#v", x)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteString writes a string literal.
|
||||
func (w *CodeWriter) WriteString(s string) {
|
||||
s = strings.Replace(s, `\`, `\\`, -1)
|
||||
io.WriteString(w.Hash, s) // content hash
|
||||
w.Size += len(s)
|
||||
|
||||
const maxInline = 40
|
||||
if len(s) <= maxInline {
|
||||
w.printf("%q", s)
|
||||
return
|
||||
}
|
||||
|
||||
// We will render the string as a multi-line string.
|
||||
const maxWidth = 80 - 4 - len(`"`) - len(`" +`)
|
||||
|
||||
// When starting on its own line, go fmt indents line 2+ an extra level.
|
||||
n, max := maxWidth, maxWidth-4
|
||||
|
||||
// As per https://golang.org/issue/18078, the compiler has trouble
|
||||
// compiling the concatenation of many strings, s0 + s1 + s2 + ... + sN,
|
||||
// for large N. We insert redundant, explicit parentheses to work around
|
||||
// that, lowering the N at any given step: (s0 + s1 + ... + s63) + (s64 +
|
||||
// ... + s127) + etc + (etc + ... + sN).
|
||||
explicitParens, extraComment := len(s) > 128*1024, ""
|
||||
if explicitParens {
|
||||
w.printf(`(`)
|
||||
extraComment = "; the redundant, explicit parens are for https://golang.org/issue/18078"
|
||||
}
|
||||
|
||||
// Print "" +\n, if a string does not start on its own line.
|
||||
b := w.buf.Bytes()
|
||||
if p := len(bytes.TrimRight(b, " \t")); p > 0 && b[p-1] != '\n' {
|
||||
w.printf("\"\" + // Size: %d bytes%s\n", len(s), extraComment)
|
||||
n, max = maxWidth, maxWidth
|
||||
}
|
||||
|
||||
w.printf(`"`)
|
||||
|
||||
for sz, p, nLines := 0, 0, 0; p < len(s); {
|
||||
var r rune
|
||||
r, sz = utf8.DecodeRuneInString(s[p:])
|
||||
out := s[p : p+sz]
|
||||
chars := 1
|
||||
if !unicode.IsPrint(r) || r == utf8.RuneError || r == '"' {
|
||||
switch sz {
|
||||
case 1:
|
||||
out = fmt.Sprintf("\\x%02x", s[p])
|
||||
case 2, 3:
|
||||
out = fmt.Sprintf("\\u%04x", r)
|
||||
case 4:
|
||||
out = fmt.Sprintf("\\U%08x", r)
|
||||
}
|
||||
chars = len(out)
|
||||
}
|
||||
if n -= chars; n < 0 {
|
||||
nLines++
|
||||
if explicitParens && nLines&63 == 63 {
|
||||
w.printf("\") + (\"")
|
||||
}
|
||||
w.printf("\" +\n\"")
|
||||
n = max - len(out)
|
||||
}
|
||||
w.printf("%s", out)
|
||||
p += sz
|
||||
}
|
||||
w.printf(`"`)
|
||||
if explicitParens {
|
||||
w.printf(`)`)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteSlice writes a slice value.
|
||||
func (w *CodeWriter) WriteSlice(x interface{}) {
|
||||
w.writeSlice(x, false)
|
||||
}
|
||||
|
||||
// WriteArray writes an array value.
|
||||
func (w *CodeWriter) WriteArray(x interface{}) {
|
||||
w.writeSlice(x, true)
|
||||
}
|
||||
|
||||
func (w *CodeWriter) writeSlice(x interface{}, isArray bool) {
|
||||
v := reflect.ValueOf(x)
|
||||
w.gob.Encode(v.Len())
|
||||
w.Size += v.Len() * int(v.Type().Elem().Size())
|
||||
name := typeName(x)
|
||||
if isArray {
|
||||
name = fmt.Sprintf("[%d]%s", v.Len(), name[strings.Index(name, "]")+1:])
|
||||
}
|
||||
if isArray {
|
||||
w.printf("%s{\n", name)
|
||||
} else {
|
||||
w.printf("%s{ // %d elements\n", name, v.Len())
|
||||
}
|
||||
|
||||
switch kind := v.Type().Elem().Kind(); kind {
|
||||
case reflect.String:
|
||||
for _, s := range x.([]string) {
|
||||
w.WriteString(s)
|
||||
w.printf(",\n")
|
||||
}
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
// nLine and nBlock are the number of elements per line and block.
|
||||
nLine, nBlock, format := 8, 64, "%d,"
|
||||
switch kind {
|
||||
case reflect.Uint8:
|
||||
format = "%#02x,"
|
||||
case reflect.Uint16:
|
||||
format = "%#04x,"
|
||||
case reflect.Uint32:
|
||||
nLine, nBlock, format = 4, 32, "%#08x,"
|
||||
case reflect.Uint, reflect.Uint64:
|
||||
nLine, nBlock, format = 4, 32, "%#016x,"
|
||||
case reflect.Int8:
|
||||
nLine = 16
|
||||
}
|
||||
n := nLine
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
if i%nBlock == 0 && v.Len() > nBlock {
|
||||
w.printf("// Entry %X - %X\n", i, i+nBlock-1)
|
||||
}
|
||||
x := v.Index(i).Interface()
|
||||
w.gob.Encode(x)
|
||||
w.printf(format, x)
|
||||
if n--; n == 0 {
|
||||
n = nLine
|
||||
w.printf("\n")
|
||||
}
|
||||
}
|
||||
w.printf("\n")
|
||||
case reflect.Struct:
|
||||
zero := reflect.Zero(v.Type().Elem()).Interface()
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
x := v.Index(i).Interface()
|
||||
w.gob.EncodeValue(v)
|
||||
if !reflect.DeepEqual(zero, x) {
|
||||
line := fmt.Sprintf("%#v,\n", x)
|
||||
line = line[strings.IndexByte(line, '{'):]
|
||||
w.printf("%d: ", i)
|
||||
w.printf(line)
|
||||
}
|
||||
}
|
||||
case reflect.Array:
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
w.printf("%d: %#v,\n", i, v.Index(i).Interface())
|
||||
}
|
||||
default:
|
||||
panic("gen: slice elem type not supported")
|
||||
}
|
||||
w.printf("}")
|
||||
}
|
||||
|
||||
// WriteType writes a definition of the type of the given value and returns the
|
||||
// type name.
|
||||
func (w *CodeWriter) WriteType(x interface{}) string {
|
||||
t := reflect.TypeOf(x)
|
||||
w.printf("type %s struct {\n", t.Name())
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
w.printf("\t%s %s\n", t.Field(i).Name, t.Field(i).Type)
|
||||
}
|
||||
w.printf("}\n")
|
||||
return t.Name()
|
||||
}
|
||||
|
||||
// typeName returns the name of the go type of x.
|
||||
func typeName(x interface{}) string {
|
||||
t := reflect.ValueOf(x).Type()
|
||||
return strings.Replace(fmt.Sprint(t), "main.", "", 1)
|
||||
}
|
333
vendor/golang.org/x/text/internal/gen/gen.go
generated
vendored
333
vendor/golang.org/x/text/internal/gen/gen.go
generated
vendored
@ -1,333 +0,0 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package gen contains common code for the various code generation tools in the
|
||||
// text repository. Its usage ensures consistency between tools.
|
||||
//
|
||||
// This package defines command line flags that are common to most generation
|
||||
// tools. The flags allow for specifying specific Unicode and CLDR versions
|
||||
// in the public Unicode data repository (http://www.unicode.org/Public).
|
||||
//
|
||||
// A local Unicode data mirror can be set through the flag -local or the
|
||||
// environment variable UNICODE_DIR. The former takes precedence. The local
|
||||
// directory should follow the same structure as the public repository.
|
||||
//
|
||||
// IANA data can also optionally be mirrored by putting it in the iana directory
|
||||
// rooted at the top of the local mirror. Beware, though, that IANA data is not
|
||||
// versioned. So it is up to the developer to use the right version.
|
||||
package gen // import "golang.org/x/text/internal/gen"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/build"
|
||||
"go/format"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"unicode"
|
||||
|
||||
"golang.org/x/text/unicode/cldr"
|
||||
)
|
||||
|
||||
var (
|
||||
url = flag.String("url",
|
||||
"http://www.unicode.org/Public",
|
||||
"URL of Unicode database directory")
|
||||
iana = flag.String("iana",
|
||||
"http://www.iana.org",
|
||||
"URL of the IANA repository")
|
||||
unicodeVersion = flag.String("unicode",
|
||||
getEnv("UNICODE_VERSION", unicode.Version),
|
||||
"unicode version to use")
|
||||
cldrVersion = flag.String("cldr",
|
||||
getEnv("CLDR_VERSION", cldr.Version),
|
||||
"cldr version to use")
|
||||
)
|
||||
|
||||
func getEnv(name, def string) string {
|
||||
if v := os.Getenv(name); v != "" {
|
||||
return v
|
||||
}
|
||||
return def
|
||||
}
|
||||
|
||||
// Init performs common initialization for a gen command. It parses the flags
|
||||
// and sets up the standard logging parameters.
|
||||
func Init() {
|
||||
log.SetPrefix("")
|
||||
log.SetFlags(log.Lshortfile)
|
||||
flag.Parse()
|
||||
}
|
||||
|
||||
const header = `// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||
|
||||
`
|
||||
|
||||
// UnicodeVersion reports the requested Unicode version.
|
||||
func UnicodeVersion() string {
|
||||
return *unicodeVersion
|
||||
}
|
||||
|
||||
// CLDRVersion reports the requested CLDR version.
|
||||
func CLDRVersion() string {
|
||||
return *cldrVersion
|
||||
}
|
||||
|
||||
var tags = []struct{ version, buildTags string }{
|
||||
{"10.0.0", "go1.10"},
|
||||
{"", "!go1.10"},
|
||||
}
|
||||
|
||||
// buildTags reports the build tags used for the current Unicode version.
|
||||
func buildTags() string {
|
||||
v := UnicodeVersion()
|
||||
for _, x := range tags {
|
||||
// We should do a numeric comparison, but including the collate package
|
||||
// would create an import cycle. We approximate it by assuming that
|
||||
// longer version strings are later.
|
||||
if len(x.version) <= len(v) {
|
||||
return x.buildTags
|
||||
}
|
||||
if len(x.version) == len(v) && x.version <= v {
|
||||
return x.buildTags
|
||||
}
|
||||
}
|
||||
return tags[0].buildTags
|
||||
}
|
||||
|
||||
// IsLocal reports whether data files are available locally.
|
||||
func IsLocal() bool {
|
||||
dir, err := localReadmeFile()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if _, err = os.Stat(dir); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// OpenUCDFile opens the requested UCD file. The file is specified relative to
|
||||
// the public Unicode root directory. It will call log.Fatal if there are any
|
||||
// errors.
|
||||
func OpenUCDFile(file string) io.ReadCloser {
|
||||
return openUnicode(path.Join(*unicodeVersion, "ucd", file))
|
||||
}
|
||||
|
||||
// OpenCLDRCoreZip opens the CLDR core zip file. It will call log.Fatal if there
|
||||
// are any errors.
|
||||
func OpenCLDRCoreZip() io.ReadCloser {
|
||||
return OpenUnicodeFile("cldr", *cldrVersion, "core.zip")
|
||||
}
|
||||
|
||||
// OpenUnicodeFile opens the requested file of the requested category from the
|
||||
// root of the Unicode data archive. The file is specified relative to the
|
||||
// public Unicode root directory. If version is "", it will use the default
|
||||
// Unicode version. It will call log.Fatal if there are any errors.
|
||||
func OpenUnicodeFile(category, version, file string) io.ReadCloser {
|
||||
if version == "" {
|
||||
version = UnicodeVersion()
|
||||
}
|
||||
return openUnicode(path.Join(category, version, file))
|
||||
}
|
||||
|
||||
// OpenIANAFile opens the requested IANA file. The file is specified relative
|
||||
// to the IANA root, which is typically either http://www.iana.org or the
|
||||
// iana directory in the local mirror. It will call log.Fatal if there are any
|
||||
// errors.
|
||||
func OpenIANAFile(path string) io.ReadCloser {
|
||||
return Open(*iana, "iana", path)
|
||||
}
|
||||
|
||||
var (
|
||||
dirMutex sync.Mutex
|
||||
localDir string
|
||||
)
|
||||
|
||||
const permissions = 0755
|
||||
|
||||
func localReadmeFile() (string, error) {
|
||||
p, err := build.Import("golang.org/x/text", "", build.FindOnly)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Could not locate package: %v", err)
|
||||
}
|
||||
return filepath.Join(p.Dir, "DATA", "README"), nil
|
||||
}
|
||||
|
||||
func getLocalDir() string {
|
||||
dirMutex.Lock()
|
||||
defer dirMutex.Unlock()
|
||||
|
||||
readme, err := localReadmeFile()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
dir := filepath.Dir(readme)
|
||||
if _, err := os.Stat(readme); err != nil {
|
||||
if err := os.MkdirAll(dir, permissions); err != nil {
|
||||
log.Fatalf("Could not create directory: %v", err)
|
||||
}
|
||||
ioutil.WriteFile(readme, []byte(readmeTxt), permissions)
|
||||
}
|
||||
return dir
|
||||
}
|
||||
|
||||
const readmeTxt = `Generated by golang.org/x/text/internal/gen. DO NOT EDIT.
|
||||
|
||||
This directory contains downloaded files used to generate the various tables
|
||||
in the golang.org/x/text subrepo.
|
||||
|
||||
Note that the language subtag repo (iana/assignments/language-subtag-registry)
|
||||
and all other times in the iana subdirectory are not versioned and will need
|
||||
to be periodically manually updated. The easiest way to do this is to remove
|
||||
the entire iana directory. This is mostly of concern when updating the language
|
||||
package.
|
||||
`
|
||||
|
||||
// Open opens subdir/path if a local directory is specified and the file exists,
|
||||
// where subdir is a directory relative to the local root, or fetches it from
|
||||
// urlRoot/path otherwise. It will call log.Fatal if there are any errors.
|
||||
func Open(urlRoot, subdir, path string) io.ReadCloser {
|
||||
file := filepath.Join(getLocalDir(), subdir, filepath.FromSlash(path))
|
||||
return open(file, urlRoot, path)
|
||||
}
|
||||
|
||||
func openUnicode(path string) io.ReadCloser {
|
||||
file := filepath.Join(getLocalDir(), filepath.FromSlash(path))
|
||||
return open(file, *url, path)
|
||||
}
|
||||
|
||||
// TODO: automatically periodically update non-versioned files.
|
||||
|
||||
func open(file, urlRoot, path string) io.ReadCloser {
|
||||
if f, err := os.Open(file); err == nil {
|
||||
return f
|
||||
}
|
||||
r := get(urlRoot, path)
|
||||
defer r.Close()
|
||||
b, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not download file: %v", err)
|
||||
}
|
||||
os.MkdirAll(filepath.Dir(file), permissions)
|
||||
if err := ioutil.WriteFile(file, b, permissions); err != nil {
|
||||
log.Fatalf("Could not create file: %v", err)
|
||||
}
|
||||
return ioutil.NopCloser(bytes.NewReader(b))
|
||||
}
|
||||
|
||||
func get(root, path string) io.ReadCloser {
|
||||
url := root + "/" + path
|
||||
fmt.Printf("Fetching %s...", url)
|
||||
defer fmt.Println(" done.")
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
log.Fatalf("HTTP GET: %v", err)
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
log.Fatalf("Bad GET status for %q: %q", url, resp.Status)
|
||||
}
|
||||
return resp.Body
|
||||
}
|
||||
|
||||
// TODO: use Write*Version in all applicable packages.
|
||||
|
||||
// WriteUnicodeVersion writes a constant for the Unicode version from which the
|
||||
// tables are generated.
|
||||
func WriteUnicodeVersion(w io.Writer) {
|
||||
fmt.Fprintf(w, "// UnicodeVersion is the Unicode version from which the tables in this package are derived.\n")
|
||||
fmt.Fprintf(w, "const UnicodeVersion = %q\n\n", UnicodeVersion())
|
||||
}
|
||||
|
||||
// WriteCLDRVersion writes a constant for the CLDR version from which the
|
||||
// tables are generated.
|
||||
func WriteCLDRVersion(w io.Writer) {
|
||||
fmt.Fprintf(w, "// CLDRVersion is the CLDR version from which the tables in this package are derived.\n")
|
||||
fmt.Fprintf(w, "const CLDRVersion = %q\n\n", CLDRVersion())
|
||||
}
|
||||
|
||||
// WriteGoFile prepends a standard file comment and package statement to the
|
||||
// given bytes, applies gofmt, and writes them to a file with the given name.
|
||||
// It will call log.Fatal if there are any errors.
|
||||
func WriteGoFile(filename, pkg string, b []byte) {
|
||||
w, err := os.Create(filename)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not create file %s: %v", filename, err)
|
||||
}
|
||||
defer w.Close()
|
||||
if _, err = WriteGo(w, pkg, "", b); err != nil {
|
||||
log.Fatalf("Error writing file %s: %v", filename, err)
|
||||
}
|
||||
}
|
||||
|
||||
func insertVersion(filename, version string) string {
|
||||
suffix := ".go"
|
||||
if strings.HasSuffix(filename, "_test.go") {
|
||||
suffix = "_test.go"
|
||||
}
|
||||
return fmt.Sprint(filename[:len(filename)-len(suffix)], version, suffix)
|
||||
}
|
||||
|
||||
// WriteVersionedGoFile prepends a standard file comment, adds build tags to
|
||||
// version the file for the current Unicode version, and package statement to
|
||||
// the given bytes, applies gofmt, and writes them to a file with the given
|
||||
// name. It will call log.Fatal if there are any errors.
|
||||
func WriteVersionedGoFile(filename, pkg string, b []byte) {
|
||||
tags := buildTags()
|
||||
if tags != "" {
|
||||
filename = insertVersion(filename, UnicodeVersion())
|
||||
}
|
||||
w, err := os.Create(filename)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not create file %s: %v", filename, err)
|
||||
}
|
||||
defer w.Close()
|
||||
if _, err = WriteGo(w, pkg, tags, b); err != nil {
|
||||
log.Fatalf("Error writing file %s: %v", filename, err)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteGo prepends a standard file comment and package statement to the given
|
||||
// bytes, applies gofmt, and writes them to w.
|
||||
func WriteGo(w io.Writer, pkg, tags string, b []byte) (n int, err error) {
|
||||
src := []byte(header)
|
||||
if tags != "" {
|
||||
src = append(src, fmt.Sprintf("// +build %s\n\n", tags)...)
|
||||
}
|
||||
src = append(src, fmt.Sprintf("package %s\n\n", pkg)...)
|
||||
src = append(src, b...)
|
||||
formatted, err := format.Source(src)
|
||||
if err != nil {
|
||||
// Print the generated code even in case of an error so that the
|
||||
// returned error can be meaningfully interpreted.
|
||||
n, _ = w.Write(src)
|
||||
return n, err
|
||||
}
|
||||
return w.Write(formatted)
|
||||
}
|
||||
|
||||
// Repackage rewrites a Go file from belonging to package main to belonging to
|
||||
// the given package.
|
||||
func Repackage(inFile, outFile, pkg string) {
|
||||
src, err := ioutil.ReadFile(inFile)
|
||||
if err != nil {
|
||||
log.Fatalf("reading %s: %v", inFile, err)
|
||||
}
|
||||
const toDelete = "package main\n\n"
|
||||
i := bytes.Index(src, []byte(toDelete))
|
||||
if i < 0 {
|
||||
log.Fatalf("Could not find %q in %s.", toDelete, inFile)
|
||||
}
|
||||
w := &bytes.Buffer{}
|
||||
w.Write(src[i+len(toDelete):])
|
||||
WriteGoFile(outFile, pkg, w.Bytes())
|
||||
}
|
100
vendor/golang.org/x/text/internal/tag/tag.go
generated
vendored
100
vendor/golang.org/x/text/internal/tag/tag.go
generated
vendored
@ -1,100 +0,0 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package tag contains functionality handling tags and related data.
|
||||
package tag // import "golang.org/x/text/internal/tag"
|
||||
|
||||
import "sort"
|
||||
|
||||
// An Index converts tags to a compact numeric value.
|
||||
//
|
||||
// All elements are of size 4. Tags may be up to 4 bytes long. Excess bytes can
|
||||
// be used to store additional information about the tag.
|
||||
type Index string
|
||||
|
||||
// Elem returns the element data at the given index.
|
||||
func (s Index) Elem(x int) string {
|
||||
return string(s[x*4 : x*4+4])
|
||||
}
|
||||
|
||||
// Index reports the index of the given key or -1 if it could not be found.
|
||||
// Only the first len(key) bytes from the start of the 4-byte entries will be
|
||||
// considered for the search and the first match in Index will be returned.
|
||||
func (s Index) Index(key []byte) int {
|
||||
n := len(key)
|
||||
// search the index of the first entry with an equal or higher value than
|
||||
// key in s.
|
||||
index := sort.Search(len(s)/4, func(i int) bool {
|
||||
return cmp(s[i*4:i*4+n], key) != -1
|
||||
})
|
||||
i := index * 4
|
||||
if cmp(s[i:i+len(key)], key) != 0 {
|
||||
return -1
|
||||
}
|
||||
return index
|
||||
}
|
||||
|
||||
// Next finds the next occurrence of key after index x, which must have been
|
||||
// obtained from a call to Index using the same key. It returns x+1 or -1.
|
||||
func (s Index) Next(key []byte, x int) int {
|
||||
if x++; x*4 < len(s) && cmp(s[x*4:x*4+len(key)], key) == 0 {
|
||||
return x
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// cmp returns an integer comparing a and b lexicographically.
|
||||
func cmp(a Index, b []byte) int {
|
||||
n := len(a)
|
||||
if len(b) < n {
|
||||
n = len(b)
|
||||
}
|
||||
for i, c := range b[:n] {
|
||||
switch {
|
||||
case a[i] > c:
|
||||
return 1
|
||||
case a[i] < c:
|
||||
return -1
|
||||
}
|
||||
}
|
||||
switch {
|
||||
case len(a) < len(b):
|
||||
return -1
|
||||
case len(a) > len(b):
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Compare returns an integer comparing a and b lexicographically.
|
||||
func Compare(a string, b []byte) int {
|
||||
return cmp(Index(a), b)
|
||||
}
|
||||
|
||||
// FixCase reformats b to the same pattern of cases as form.
|
||||
// If returns false if string b is malformed.
|
||||
func FixCase(form string, b []byte) bool {
|
||||
if len(form) != len(b) {
|
||||
return false
|
||||
}
|
||||
for i, c := range b {
|
||||
if form[i] <= 'Z' {
|
||||
if c >= 'a' {
|
||||
c -= 'z' - 'Z'
|
||||
}
|
||||
if c < 'A' || 'Z' < c {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
if c <= 'Z' {
|
||||
c += 'z' - 'Z'
|
||||
}
|
||||
if c < 'a' || 'z' < c {
|
||||
return false
|
||||
}
|
||||
}
|
||||
b[i] = c
|
||||
}
|
||||
return true
|
||||
}
|
58
vendor/golang.org/x/text/internal/triegen/compact.go
generated
vendored
58
vendor/golang.org/x/text/internal/triegen/compact.go
generated
vendored
@ -1,58 +0,0 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package triegen
|
||||
|
||||
// This file defines Compacter and its implementations.
|
||||
|
||||
import "io"
|
||||
|
||||
// A Compacter generates an alternative, more space-efficient way to store a
|
||||
// trie value block. A trie value block holds all possible values for the last
|
||||
// byte of a UTF-8 encoded rune. Excluding ASCII characters, a trie value block
|
||||
// always has 64 values, as a UTF-8 encoding ends with a byte in [0x80, 0xC0).
|
||||
type Compacter interface {
|
||||
// Size returns whether the Compacter could encode the given block as well
|
||||
// as its size in case it can. len(v) is always 64.
|
||||
Size(v []uint64) (sz int, ok bool)
|
||||
|
||||
// Store stores the block using the Compacter's compression method.
|
||||
// It returns a handle with which the block can be retrieved.
|
||||
// len(v) is always 64.
|
||||
Store(v []uint64) uint32
|
||||
|
||||
// Print writes the data structures associated to the given store to w.
|
||||
Print(w io.Writer) error
|
||||
|
||||
// Handler returns the name of a function that gets called during trie
|
||||
// lookup for blocks generated by the Compacter. The function should be of
|
||||
// the form func (n uint32, b byte) uint64, where n is the index returned by
|
||||
// the Compacter's Store method and b is the last byte of the UTF-8
|
||||
// encoding, where 0x80 <= b < 0xC0, for which to do the lookup in the
|
||||
// block.
|
||||
Handler() string
|
||||
}
|
||||
|
||||
// simpleCompacter is the default Compacter used by builder. It implements a
|
||||
// normal trie block.
|
||||
type simpleCompacter builder
|
||||
|
||||
func (b *simpleCompacter) Size([]uint64) (sz int, ok bool) {
|
||||
return blockSize * b.ValueSize, true
|
||||
}
|
||||
|
||||
func (b *simpleCompacter) Store(v []uint64) uint32 {
|
||||
h := uint32(len(b.ValueBlocks) - blockOffset)
|
||||
b.ValueBlocks = append(b.ValueBlocks, v)
|
||||
return h
|
||||
}
|
||||
|
||||
func (b *simpleCompacter) Print(io.Writer) error {
|
||||
// Structures are printed in print.go.
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *simpleCompacter) Handler() string {
|
||||
panic("Handler should be special-cased for this Compacter")
|
||||
}
|
251
vendor/golang.org/x/text/internal/triegen/print.go
generated
vendored
251
vendor/golang.org/x/text/internal/triegen/print.go
generated
vendored
@ -1,251 +0,0 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package triegen
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
// print writes all the data structures as well as the code necessary to use the
|
||||
// trie to w.
|
||||
func (b *builder) print(w io.Writer) error {
|
||||
b.Stats.NValueEntries = len(b.ValueBlocks) * blockSize
|
||||
b.Stats.NValueBytes = len(b.ValueBlocks) * blockSize * b.ValueSize
|
||||
b.Stats.NIndexEntries = len(b.IndexBlocks) * blockSize
|
||||
b.Stats.NIndexBytes = len(b.IndexBlocks) * blockSize * b.IndexSize
|
||||
b.Stats.NHandleBytes = len(b.Trie) * 2 * b.IndexSize
|
||||
|
||||
// If we only have one root trie, all starter blocks are at position 0 and
|
||||
// we can access the arrays directly.
|
||||
if len(b.Trie) == 1 {
|
||||
// At this point we cannot refer to the generated tables directly.
|
||||
b.ASCIIBlock = b.Name + "Values"
|
||||
b.StarterBlock = b.Name + "Index"
|
||||
} else {
|
||||
// Otherwise we need to have explicit starter indexes in the trie
|
||||
// structure.
|
||||
b.ASCIIBlock = "t.ascii"
|
||||
b.StarterBlock = "t.utf8Start"
|
||||
}
|
||||
|
||||
b.SourceType = "[]byte"
|
||||
if err := lookupGen.Execute(w, b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.SourceType = "string"
|
||||
if err := lookupGen.Execute(w, b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := trieGen.Execute(w, b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, c := range b.Compactions {
|
||||
if err := c.c.Print(w); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func printValues(n int, values []uint64) string {
|
||||
w := &bytes.Buffer{}
|
||||
boff := n * blockSize
|
||||
fmt.Fprintf(w, "\t// Block %#x, offset %#x", n, boff)
|
||||
var newline bool
|
||||
for i, v := range values {
|
||||
if i%6 == 0 {
|
||||
newline = true
|
||||
}
|
||||
if v != 0 {
|
||||
if newline {
|
||||
fmt.Fprintf(w, "\n")
|
||||
newline = false
|
||||
}
|
||||
fmt.Fprintf(w, "\t%#02x:%#04x, ", boff+i, v)
|
||||
}
|
||||
}
|
||||
return w.String()
|
||||
}
|
||||
|
||||
func printIndex(b *builder, nr int, n *node) string {
|
||||
w := &bytes.Buffer{}
|
||||
boff := nr * blockSize
|
||||
fmt.Fprintf(w, "\t// Block %#x, offset %#x", nr, boff)
|
||||
var newline bool
|
||||
for i, c := range n.children {
|
||||
if i%8 == 0 {
|
||||
newline = true
|
||||
}
|
||||
if c != nil {
|
||||
v := b.Compactions[c.index.compaction].Offset + uint32(c.index.index)
|
||||
if v != 0 {
|
||||
if newline {
|
||||
fmt.Fprintf(w, "\n")
|
||||
newline = false
|
||||
}
|
||||
fmt.Fprintf(w, "\t%#02x:%#02x, ", boff+i, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
return w.String()
|
||||
}
|
||||
|
||||
var (
|
||||
trieGen = template.Must(template.New("trie").Funcs(template.FuncMap{
|
||||
"printValues": printValues,
|
||||
"printIndex": printIndex,
|
||||
"title": strings.Title,
|
||||
"dec": func(x int) int { return x - 1 },
|
||||
"psize": func(n int) string {
|
||||
return fmt.Sprintf("%d bytes (%.2f KiB)", n, float64(n)/1024)
|
||||
},
|
||||
}).Parse(trieTemplate))
|
||||
lookupGen = template.Must(template.New("lookup").Parse(lookupTemplate))
|
||||
)
|
||||
|
||||
// TODO: consider the return type of lookup. It could be uint64, even if the
|
||||
// internal value type is smaller. We will have to verify this with the
|
||||
// performance of unicode/norm, which is very sensitive to such changes.
|
||||
const trieTemplate = `{{$b := .}}{{$multi := gt (len .Trie) 1}}
|
||||
// {{.Name}}Trie. Total size: {{psize .Size}}. Checksum: {{printf "%08x" .Checksum}}.
|
||||
type {{.Name}}Trie struct { {{if $multi}}
|
||||
ascii []{{.ValueType}} // index for ASCII bytes
|
||||
utf8Start []{{.IndexType}} // index for UTF-8 bytes >= 0xC0
|
||||
{{end}}}
|
||||
|
||||
func new{{title .Name}}Trie(i int) *{{.Name}}Trie { {{if $multi}}
|
||||
h := {{.Name}}TrieHandles[i]
|
||||
return &{{.Name}}Trie{ {{.Name}}Values[uint32(h.ascii)<<6:], {{.Name}}Index[uint32(h.multi)<<6:] }
|
||||
}
|
||||
|
||||
type {{.Name}}TrieHandle struct {
|
||||
ascii, multi {{.IndexType}}
|
||||
}
|
||||
|
||||
// {{.Name}}TrieHandles: {{len .Trie}} handles, {{.Stats.NHandleBytes}} bytes
|
||||
var {{.Name}}TrieHandles = [{{len .Trie}}]{{.Name}}TrieHandle{
|
||||
{{range .Trie}} { {{.ASCIIIndex}}, {{.StarterIndex}} }, // {{printf "%08x" .Checksum}}: {{.Name}}
|
||||
{{end}}}{{else}}
|
||||
return &{{.Name}}Trie{}
|
||||
}
|
||||
{{end}}
|
||||
// lookupValue determines the type of block n and looks up the value for b.
|
||||
func (t *{{.Name}}Trie) lookupValue(n uint32, b byte) {{.ValueType}}{{$last := dec (len .Compactions)}} {
|
||||
switch { {{range $i, $c := .Compactions}}
|
||||
{{if eq $i $last}}default{{else}}case n < {{$c.Cutoff}}{{end}}:{{if ne $i 0}}
|
||||
n -= {{$c.Offset}}{{end}}
|
||||
return {{print $b.ValueType}}({{$c.Handler}}){{end}}
|
||||
}
|
||||
}
|
||||
|
||||
// {{.Name}}Values: {{len .ValueBlocks}} blocks, {{.Stats.NValueEntries}} entries, {{.Stats.NValueBytes}} bytes
|
||||
// The third block is the zero block.
|
||||
var {{.Name}}Values = [{{.Stats.NValueEntries}}]{{.ValueType}} {
|
||||
{{range $i, $v := .ValueBlocks}}{{printValues $i $v}}
|
||||
{{end}}}
|
||||
|
||||
// {{.Name}}Index: {{len .IndexBlocks}} blocks, {{.Stats.NIndexEntries}} entries, {{.Stats.NIndexBytes}} bytes
|
||||
// Block 0 is the zero block.
|
||||
var {{.Name}}Index = [{{.Stats.NIndexEntries}}]{{.IndexType}} {
|
||||
{{range $i, $v := .IndexBlocks}}{{printIndex $b $i $v}}
|
||||
{{end}}}
|
||||
`
|
||||
|
||||
// TODO: consider allowing zero-length strings after evaluating performance with
|
||||
// unicode/norm.
|
||||
const lookupTemplate = `
|
||||
// lookup{{if eq .SourceType "string"}}String{{end}} returns the trie value for the first UTF-8 encoding in s and
|
||||
// the width in bytes of this encoding. The size will be 0 if s does not
|
||||
// hold enough bytes to complete the encoding. len(s) must be greater than 0.
|
||||
func (t *{{.Name}}Trie) lookup{{if eq .SourceType "string"}}String{{end}}(s {{.SourceType}}) (v {{.ValueType}}, sz int) {
|
||||
c0 := s[0]
|
||||
switch {
|
||||
case c0 < 0x80: // is ASCII
|
||||
return {{.ASCIIBlock}}[c0], 1
|
||||
case c0 < 0xC2:
|
||||
return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
|
||||
case c0 < 0xE0: // 2-byte UTF-8
|
||||
if len(s) < 2 {
|
||||
return 0, 0
|
||||
}
|
||||
i := {{.StarterBlock}}[c0]
|
||||
c1 := s[1]
|
||||
if c1 < 0x80 || 0xC0 <= c1 {
|
||||
return 0, 1 // Illegal UTF-8: not a continuation byte.
|
||||
}
|
||||
return t.lookupValue(uint32(i), c1), 2
|
||||
case c0 < 0xF0: // 3-byte UTF-8
|
||||
if len(s) < 3 {
|
||||
return 0, 0
|
||||
}
|
||||
i := {{.StarterBlock}}[c0]
|
||||
c1 := s[1]
|
||||
if c1 < 0x80 || 0xC0 <= c1 {
|
||||
return 0, 1 // Illegal UTF-8: not a continuation byte.
|
||||
}
|
||||
o := uint32(i)<<6 + uint32(c1)
|
||||
i = {{.Name}}Index[o]
|
||||
c2 := s[2]
|
||||
if c2 < 0x80 || 0xC0 <= c2 {
|
||||
return 0, 2 // Illegal UTF-8: not a continuation byte.
|
||||
}
|
||||
return t.lookupValue(uint32(i), c2), 3
|
||||
case c0 < 0xF8: // 4-byte UTF-8
|
||||
if len(s) < 4 {
|
||||
return 0, 0
|
||||
}
|
||||
i := {{.StarterBlock}}[c0]
|
||||
c1 := s[1]
|
||||
if c1 < 0x80 || 0xC0 <= c1 {
|
||||
return 0, 1 // Illegal UTF-8: not a continuation byte.
|
||||
}
|
||||
o := uint32(i)<<6 + uint32(c1)
|
||||
i = {{.Name}}Index[o]
|
||||
c2 := s[2]
|
||||
if c2 < 0x80 || 0xC0 <= c2 {
|
||||
return 0, 2 // Illegal UTF-8: not a continuation byte.
|
||||
}
|
||||
o = uint32(i)<<6 + uint32(c2)
|
||||
i = {{.Name}}Index[o]
|
||||
c3 := s[3]
|
||||
if c3 < 0x80 || 0xC0 <= c3 {
|
||||
return 0, 3 // Illegal UTF-8: not a continuation byte.
|
||||
}
|
||||
return t.lookupValue(uint32(i), c3), 4
|
||||
}
|
||||
// Illegal rune
|
||||
return 0, 1
|
||||
}
|
||||
|
||||
// lookup{{if eq .SourceType "string"}}String{{end}}Unsafe returns the trie value for the first UTF-8 encoding in s.
|
||||
// s must start with a full and valid UTF-8 encoded rune.
|
||||
func (t *{{.Name}}Trie) lookup{{if eq .SourceType "string"}}String{{end}}Unsafe(s {{.SourceType}}) {{.ValueType}} {
|
||||
c0 := s[0]
|
||||
if c0 < 0x80 { // is ASCII
|
||||
return {{.ASCIIBlock}}[c0]
|
||||
}
|
||||
i := {{.StarterBlock}}[c0]
|
||||
if c0 < 0xE0 { // 2-byte UTF-8
|
||||
return t.lookupValue(uint32(i), s[1])
|
||||
}
|
||||
i = {{.Name}}Index[uint32(i)<<6+uint32(s[1])]
|
||||
if c0 < 0xF0 { // 3-byte UTF-8
|
||||
return t.lookupValue(uint32(i), s[2])
|
||||
}
|
||||
i = {{.Name}}Index[uint32(i)<<6+uint32(s[2])]
|
||||
if c0 < 0xF8 { // 4-byte UTF-8
|
||||
return t.lookupValue(uint32(i), s[3])
|
||||
}
|
||||
return 0
|
||||
}
|
||||
`
|
494
vendor/golang.org/x/text/internal/triegen/triegen.go
generated
vendored
494
vendor/golang.org/x/text/internal/triegen/triegen.go
generated
vendored
@ -1,494 +0,0 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package triegen implements a code generator for a trie for associating
|
||||
// unsigned integer values with UTF-8 encoded runes.
|
||||
//
|
||||
// Many of the go.text packages use tries for storing per-rune information. A
|
||||
// trie is especially useful if many of the runes have the same value. If this
|
||||
// is the case, many blocks can be expected to be shared allowing for
|
||||
// information on many runes to be stored in little space.
|
||||
//
|
||||
// As most of the lookups are done directly on []byte slices, the tries use the
|
||||
// UTF-8 bytes directly for the lookup. This saves a conversion from UTF-8 to
|
||||
// runes and contributes a little bit to better performance. It also naturally
|
||||
// provides a fast path for ASCII.
|
||||
//
|
||||
// Space is also an issue. There are many code points defined in Unicode and as
|
||||
// a result tables can get quite large. So every byte counts. The triegen
|
||||
// package automatically chooses the smallest integer values to represent the
|
||||
// tables. Compacters allow further compression of the trie by allowing for
|
||||
// alternative representations of individual trie blocks.
|
||||
//
|
||||
// triegen allows generating multiple tries as a single structure. This is
|
||||
// useful when, for example, one wants to generate tries for several languages
|
||||
// that have a lot of values in common. Some existing libraries for
|
||||
// internationalization store all per-language data as a dynamically loadable
|
||||
// chunk. The go.text packages are designed with the assumption that the user
|
||||
// typically wants to compile in support for all supported languages, in line
|
||||
// with the approach common to Go to create a single standalone binary. The
|
||||
// multi-root trie approach can give significant storage savings in this
|
||||
// scenario.
|
||||
//
|
||||
// triegen generates both tables and code. The code is optimized to use the
|
||||
// automatically chosen data types. The following code is generated for a Trie
|
||||
// or multiple Tries named "foo":
|
||||
// - type fooTrie
|
||||
// The trie type.
|
||||
//
|
||||
// - func newFooTrie(x int) *fooTrie
|
||||
// Trie constructor, where x is the index of the trie passed to Gen.
|
||||
//
|
||||
// - func (t *fooTrie) lookup(s []byte) (v uintX, sz int)
|
||||
// The lookup method, where uintX is automatically chosen.
|
||||
//
|
||||
// - func lookupString, lookupUnsafe and lookupStringUnsafe
|
||||
// Variants of the above.
|
||||
//
|
||||
// - var fooValues and fooIndex and any tables generated by Compacters.
|
||||
// The core trie data.
|
||||
//
|
||||
// - var fooTrieHandles
|
||||
// Indexes of starter blocks in case of multiple trie roots.
|
||||
//
|
||||
// It is recommended that users test the generated trie by checking the returned
|
||||
// value for every rune. Such exhaustive tests are possible as the the number of
|
||||
// runes in Unicode is limited.
|
||||
package triegen // import "golang.org/x/text/internal/triegen"
|
||||
|
||||
// TODO: Arguably, the internally optimized data types would not have to be
|
||||
// exposed in the generated API. We could also investigate not generating the
|
||||
// code, but using it through a package. We would have to investigate the impact
|
||||
// on performance of making such change, though. For packages like unicode/norm,
|
||||
// small changes like this could tank performance.
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"hash/crc64"
|
||||
"io"
|
||||
"log"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// builder builds a set of tries for associating values with runes. The set of
|
||||
// tries can share common index and value blocks.
|
||||
type builder struct {
|
||||
Name string
|
||||
|
||||
// ValueType is the type of the trie values looked up.
|
||||
ValueType string
|
||||
|
||||
// ValueSize is the byte size of the ValueType.
|
||||
ValueSize int
|
||||
|
||||
// IndexType is the type of trie index values used for all UTF-8 bytes of
|
||||
// a rune except the last one.
|
||||
IndexType string
|
||||
|
||||
// IndexSize is the byte size of the IndexType.
|
||||
IndexSize int
|
||||
|
||||
// SourceType is used when generating the lookup functions. If the user
|
||||
// requests StringSupport, all lookup functions will be generated for
|
||||
// string input as well.
|
||||
SourceType string
|
||||
|
||||
Trie []*Trie
|
||||
|
||||
IndexBlocks []*node
|
||||
ValueBlocks [][]uint64
|
||||
Compactions []compaction
|
||||
Checksum uint64
|
||||
|
||||
ASCIIBlock string
|
||||
StarterBlock string
|
||||
|
||||
indexBlockIdx map[uint64]int
|
||||
valueBlockIdx map[uint64]nodeIndex
|
||||
asciiBlockIdx map[uint64]int
|
||||
|
||||
// Stats are used to fill out the template.
|
||||
Stats struct {
|
||||
NValueEntries int
|
||||
NValueBytes int
|
||||
NIndexEntries int
|
||||
NIndexBytes int
|
||||
NHandleBytes int
|
||||
}
|
||||
|
||||
err error
|
||||
}
|
||||
|
||||
// A nodeIndex encodes the index of a node, which is defined by the compaction
|
||||
// which stores it and an index within the compaction. For internal nodes, the
|
||||
// compaction is always 0.
|
||||
type nodeIndex struct {
|
||||
compaction int
|
||||
index int
|
||||
}
|
||||
|
||||
// compaction keeps track of stats used for the compaction.
|
||||
type compaction struct {
|
||||
c Compacter
|
||||
blocks []*node
|
||||
maxHandle uint32
|
||||
totalSize int
|
||||
|
||||
// Used by template-based generator and thus exported.
|
||||
Cutoff uint32
|
||||
Offset uint32
|
||||
Handler string
|
||||
}
|
||||
|
||||
func (b *builder) setError(err error) {
|
||||
if b.err == nil {
|
||||
b.err = err
|
||||
}
|
||||
}
|
||||
|
||||
// An Option can be passed to Gen.
|
||||
type Option func(b *builder) error
|
||||
|
||||
// Compact configures the trie generator to use the given Compacter.
|
||||
func Compact(c Compacter) Option {
|
||||
return func(b *builder) error {
|
||||
b.Compactions = append(b.Compactions, compaction{
|
||||
c: c,
|
||||
Handler: c.Handler() + "(n, b)"})
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Gen writes Go code for a shared trie lookup structure to w for the given
|
||||
// Tries. The generated trie type will be called nameTrie. newNameTrie(x) will
|
||||
// return the *nameTrie for tries[x]. A value can be looked up by using one of
|
||||
// the various lookup methods defined on nameTrie. It returns the table size of
|
||||
// the generated trie.
|
||||
func Gen(w io.Writer, name string, tries []*Trie, opts ...Option) (sz int, err error) {
|
||||
// The index contains two dummy blocks, followed by the zero block. The zero
|
||||
// block is at offset 0x80, so that the offset for the zero block for
|
||||
// continuation bytes is 0.
|
||||
b := &builder{
|
||||
Name: name,
|
||||
Trie: tries,
|
||||
IndexBlocks: []*node{{}, {}, {}},
|
||||
Compactions: []compaction{{
|
||||
Handler: name + "Values[n<<6+uint32(b)]",
|
||||
}},
|
||||
// The 0 key in indexBlockIdx and valueBlockIdx is the hash of the zero
|
||||
// block.
|
||||
indexBlockIdx: map[uint64]int{0: 0},
|
||||
valueBlockIdx: map[uint64]nodeIndex{0: {}},
|
||||
asciiBlockIdx: map[uint64]int{},
|
||||
}
|
||||
b.Compactions[0].c = (*simpleCompacter)(b)
|
||||
|
||||
for _, f := range opts {
|
||||
if err := f(b); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
b.build()
|
||||
if b.err != nil {
|
||||
return 0, b.err
|
||||
}
|
||||
if err = b.print(w); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return b.Size(), nil
|
||||
}
|
||||
|
||||
// A Trie represents a single root node of a trie. A builder may build several
|
||||
// overlapping tries at once.
|
||||
type Trie struct {
|
||||
root *node
|
||||
|
||||
hiddenTrie
|
||||
}
|
||||
|
||||
// hiddenTrie contains values we want to be visible to the template generator,
|
||||
// but hidden from the API documentation.
|
||||
type hiddenTrie struct {
|
||||
Name string
|
||||
Checksum uint64
|
||||
ASCIIIndex int
|
||||
StarterIndex int
|
||||
}
|
||||
|
||||
// NewTrie returns a new trie root.
|
||||
func NewTrie(name string) *Trie {
|
||||
return &Trie{
|
||||
&node{
|
||||
children: make([]*node, blockSize),
|
||||
values: make([]uint64, utf8.RuneSelf),
|
||||
},
|
||||
hiddenTrie{Name: name},
|
||||
}
|
||||
}
|
||||
|
||||
// Gen is a convenience wrapper around the Gen func passing t as the only trie
|
||||
// and uses the name passed to NewTrie. It returns the size of the generated
|
||||
// tables.
|
||||
func (t *Trie) Gen(w io.Writer, opts ...Option) (sz int, err error) {
|
||||
return Gen(w, t.Name, []*Trie{t}, opts...)
|
||||
}
|
||||
|
||||
// node is a node of the intermediate trie structure.
|
||||
type node struct {
|
||||
// children holds this node's children. It is always of length 64.
|
||||
// A child node may be nil.
|
||||
children []*node
|
||||
|
||||
// values contains the values of this node. If it is non-nil, this node is
|
||||
// either a root or leaf node:
|
||||
// For root nodes, len(values) == 128 and it maps the bytes in [0x00, 0x7F].
|
||||
// For leaf nodes, len(values) == 64 and it maps the bytes in [0x80, 0xBF].
|
||||
values []uint64
|
||||
|
||||
index nodeIndex
|
||||
}
|
||||
|
||||
// Insert associates value with the given rune. Insert will panic if a non-zero
|
||||
// value is passed for an invalid rune.
|
||||
func (t *Trie) Insert(r rune, value uint64) {
|
||||
if value == 0 {
|
||||
return
|
||||
}
|
||||
s := string(r)
|
||||
if []rune(s)[0] != r && value != 0 {
|
||||
// Note: The UCD tables will always assign what amounts to a zero value
|
||||
// to a surrogate. Allowing a zero value for an illegal rune allows
|
||||
// users to iterate over [0..MaxRune] without having to explicitly
|
||||
// exclude surrogates, which would be tedious.
|
||||
panic(fmt.Sprintf("triegen: non-zero value for invalid rune %U", r))
|
||||
}
|
||||
if len(s) == 1 {
|
||||
// It is a root node value (ASCII).
|
||||
t.root.values[s[0]] = value
|
||||
return
|
||||
}
|
||||
|
||||
n := t.root
|
||||
for ; len(s) > 1; s = s[1:] {
|
||||
if n.children == nil {
|
||||
n.children = make([]*node, blockSize)
|
||||
}
|
||||
p := s[0] % blockSize
|
||||
c := n.children[p]
|
||||
if c == nil {
|
||||
c = &node{}
|
||||
n.children[p] = c
|
||||
}
|
||||
if len(s) > 2 && c.values != nil {
|
||||
log.Fatalf("triegen: insert(%U): found internal node with values", r)
|
||||
}
|
||||
n = c
|
||||
}
|
||||
if n.values == nil {
|
||||
n.values = make([]uint64, blockSize)
|
||||
}
|
||||
if n.children != nil {
|
||||
log.Fatalf("triegen: insert(%U): found leaf node that also has child nodes", r)
|
||||
}
|
||||
n.values[s[0]-0x80] = value
|
||||
}
|
||||
|
||||
// Size returns the number of bytes the generated trie will take to store. It
|
||||
// needs to be exported as it is used in the templates.
|
||||
func (b *builder) Size() int {
|
||||
// Index blocks.
|
||||
sz := len(b.IndexBlocks) * blockSize * b.IndexSize
|
||||
|
||||
// Skip the first compaction, which represents the normal value blocks, as
|
||||
// its totalSize does not account for the ASCII blocks, which are managed
|
||||
// separately.
|
||||
sz += len(b.ValueBlocks) * blockSize * b.ValueSize
|
||||
for _, c := range b.Compactions[1:] {
|
||||
sz += c.totalSize
|
||||
}
|
||||
|
||||
// TODO: this computation does not account for the fixed overhead of a using
|
||||
// a compaction, either code or data. As for data, though, the typical
|
||||
// overhead of data is in the order of bytes (2 bytes for cases). Further,
|
||||
// the savings of using a compaction should anyway be substantial for it to
|
||||
// be worth it.
|
||||
|
||||
// For multi-root tries, we also need to account for the handles.
|
||||
if len(b.Trie) > 1 {
|
||||
sz += 2 * b.IndexSize * len(b.Trie)
|
||||
}
|
||||
return sz
|
||||
}
|
||||
|
||||
func (b *builder) build() {
|
||||
// Compute the sizes of the values.
|
||||
var vmax uint64
|
||||
for _, t := range b.Trie {
|
||||
vmax = maxValue(t.root, vmax)
|
||||
}
|
||||
b.ValueType, b.ValueSize = getIntType(vmax)
|
||||
|
||||
// Compute all block allocations.
|
||||
// TODO: first compute the ASCII blocks for all tries and then the other
|
||||
// nodes. ASCII blocks are more restricted in placement, as they require two
|
||||
// blocks to be placed consecutively. Processing them first may improve
|
||||
// sharing (at least one zero block can be expected to be saved.)
|
||||
for _, t := range b.Trie {
|
||||
b.Checksum += b.buildTrie(t)
|
||||
}
|
||||
|
||||
// Compute the offsets for all the Compacters.
|
||||
offset := uint32(0)
|
||||
for i := range b.Compactions {
|
||||
c := &b.Compactions[i]
|
||||
c.Offset = offset
|
||||
offset += c.maxHandle + 1
|
||||
c.Cutoff = offset
|
||||
}
|
||||
|
||||
// Compute the sizes of indexes.
|
||||
// TODO: different byte positions could have different sizes. So far we have
|
||||
// not found a case where this is beneficial.
|
||||
imax := uint64(b.Compactions[len(b.Compactions)-1].Cutoff)
|
||||
for _, ib := range b.IndexBlocks {
|
||||
if x := uint64(ib.index.index); x > imax {
|
||||
imax = x
|
||||
}
|
||||
}
|
||||
b.IndexType, b.IndexSize = getIntType(imax)
|
||||
}
|
||||
|
||||
func maxValue(n *node, max uint64) uint64 {
|
||||
if n == nil {
|
||||
return max
|
||||
}
|
||||
for _, c := range n.children {
|
||||
max = maxValue(c, max)
|
||||
}
|
||||
for _, v := range n.values {
|
||||
if max < v {
|
||||
max = v
|
||||
}
|
||||
}
|
||||
return max
|
||||
}
|
||||
|
||||
func getIntType(v uint64) (string, int) {
|
||||
switch {
|
||||
case v < 1<<8:
|
||||
return "uint8", 1
|
||||
case v < 1<<16:
|
||||
return "uint16", 2
|
||||
case v < 1<<32:
|
||||
return "uint32", 4
|
||||
}
|
||||
return "uint64", 8
|
||||
}
|
||||
|
||||
const (
|
||||
blockSize = 64
|
||||
|
||||
// Subtract two blocks to offset 0x80, the first continuation byte.
|
||||
blockOffset = 2
|
||||
|
||||
// Subtract three blocks to offset 0xC0, the first non-ASCII starter.
|
||||
rootBlockOffset = 3
|
||||
)
|
||||
|
||||
var crcTable = crc64.MakeTable(crc64.ISO)
|
||||
|
||||
func (b *builder) buildTrie(t *Trie) uint64 {
|
||||
n := t.root
|
||||
|
||||
// Get the ASCII offset. For the first trie, the ASCII block will be at
|
||||
// position 0.
|
||||
hasher := crc64.New(crcTable)
|
||||
binary.Write(hasher, binary.BigEndian, n.values)
|
||||
hash := hasher.Sum64()
|
||||
|
||||
v, ok := b.asciiBlockIdx[hash]
|
||||
if !ok {
|
||||
v = len(b.ValueBlocks)
|
||||
b.asciiBlockIdx[hash] = v
|
||||
|
||||
b.ValueBlocks = append(b.ValueBlocks, n.values[:blockSize], n.values[blockSize:])
|
||||
if v == 0 {
|
||||
// Add the zero block at position 2 so that it will be assigned a
|
||||
// zero reference in the lookup blocks.
|
||||
// TODO: always do this? This would allow us to remove a check from
|
||||
// the trie lookup, but at the expense of extra space. Analyze
|
||||
// performance for unicode/norm.
|
||||
b.ValueBlocks = append(b.ValueBlocks, make([]uint64, blockSize))
|
||||
}
|
||||
}
|
||||
t.ASCIIIndex = v
|
||||
|
||||
// Compute remaining offsets.
|
||||
t.Checksum = b.computeOffsets(n, true)
|
||||
// We already subtracted the normal blockOffset from the index. Subtract the
|
||||
// difference for starter bytes.
|
||||
t.StarterIndex = n.index.index - (rootBlockOffset - blockOffset)
|
||||
return t.Checksum
|
||||
}
|
||||
|
||||
func (b *builder) computeOffsets(n *node, root bool) uint64 {
|
||||
// For the first trie, the root lookup block will be at position 3, which is
|
||||
// the offset for UTF-8 non-ASCII starter bytes.
|
||||
first := len(b.IndexBlocks) == rootBlockOffset
|
||||
if first {
|
||||
b.IndexBlocks = append(b.IndexBlocks, n)
|
||||
}
|
||||
|
||||
// We special-case the cases where all values recursively are 0. This allows
|
||||
// for the use of a zero block to which all such values can be directed.
|
||||
hash := uint64(0)
|
||||
if n.children != nil || n.values != nil {
|
||||
hasher := crc64.New(crcTable)
|
||||
for _, c := range n.children {
|
||||
var v uint64
|
||||
if c != nil {
|
||||
v = b.computeOffsets(c, false)
|
||||
}
|
||||
binary.Write(hasher, binary.BigEndian, v)
|
||||
}
|
||||
binary.Write(hasher, binary.BigEndian, n.values)
|
||||
hash = hasher.Sum64()
|
||||
}
|
||||
|
||||
if first {
|
||||
b.indexBlockIdx[hash] = rootBlockOffset - blockOffset
|
||||
}
|
||||
|
||||
// Compacters don't apply to internal nodes.
|
||||
if n.children != nil {
|
||||
v, ok := b.indexBlockIdx[hash]
|
||||
if !ok {
|
||||
v = len(b.IndexBlocks) - blockOffset
|
||||
b.IndexBlocks = append(b.IndexBlocks, n)
|
||||
b.indexBlockIdx[hash] = v
|
||||
}
|
||||
n.index = nodeIndex{0, v}
|
||||
} else {
|
||||
h, ok := b.valueBlockIdx[hash]
|
||||
if !ok {
|
||||
bestI, bestSize := 0, blockSize*b.ValueSize
|
||||
for i, c := range b.Compactions[1:] {
|
||||
if sz, ok := c.c.Size(n.values); ok && bestSize > sz {
|
||||
bestI, bestSize = i+1, sz
|
||||
}
|
||||
}
|
||||
c := &b.Compactions[bestI]
|
||||
c.totalSize += bestSize
|
||||
v := c.c.Store(n.values)
|
||||
if c.maxHandle < v {
|
||||
c.maxHandle = v
|
||||
}
|
||||
h = nodeIndex{bestI, int(v)}
|
||||
b.valueBlockIdx[hash] = h
|
||||
}
|
||||
n.index = h
|
||||
}
|
||||
return hash
|
||||
}
|
371
vendor/golang.org/x/text/internal/ucd/ucd.go
generated
vendored
371
vendor/golang.org/x/text/internal/ucd/ucd.go
generated
vendored
@ -1,371 +0,0 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package ucd provides a parser for Unicode Character Database files, the
|
||||
// format of which is defined in http://www.unicode.org/reports/tr44/. See
|
||||
// http://www.unicode.org/Public/UCD/latest/ucd/ for example files.
|
||||
//
|
||||
// It currently does not support substitutions of missing fields.
|
||||
package ucd // import "golang.org/x/text/internal/ucd"
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// UnicodeData.txt fields.
|
||||
const (
|
||||
CodePoint = iota
|
||||
Name
|
||||
GeneralCategory
|
||||
CanonicalCombiningClass
|
||||
BidiClass
|
||||
DecompMapping
|
||||
DecimalValue
|
||||
DigitValue
|
||||
NumericValue
|
||||
BidiMirrored
|
||||
Unicode1Name
|
||||
ISOComment
|
||||
SimpleUppercaseMapping
|
||||
SimpleLowercaseMapping
|
||||
SimpleTitlecaseMapping
|
||||
)
|
||||
|
||||
// Parse calls f for each entry in the given reader of a UCD file. It will close
|
||||
// the reader upon return. It will call log.Fatal if any error occurred.
|
||||
//
|
||||
// This implements the most common usage pattern of using Parser.
|
||||
func Parse(r io.ReadCloser, f func(p *Parser)) {
|
||||
defer r.Close()
|
||||
|
||||
p := New(r)
|
||||
for p.Next() {
|
||||
f(p)
|
||||
}
|
||||
if err := p.Err(); err != nil {
|
||||
r.Close() // os.Exit will cause defers not to be called.
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// An Option is used to configure a Parser.
|
||||
type Option func(p *Parser)
|
||||
|
||||
func keepRanges(p *Parser) {
|
||||
p.keepRanges = true
|
||||
}
|
||||
|
||||
var (
|
||||
// KeepRanges prevents the expansion of ranges. The raw ranges can be
|
||||
// obtained by calling Range(0) on the parser.
|
||||
KeepRanges Option = keepRanges
|
||||
)
|
||||
|
||||
// The Part option register a handler for lines starting with a '@'. The text
|
||||
// after a '@' is available as the first field. Comments are handled as usual.
|
||||
func Part(f func(p *Parser)) Option {
|
||||
return func(p *Parser) {
|
||||
p.partHandler = f
|
||||
}
|
||||
}
|
||||
|
||||
// The CommentHandler option passes comments that are on a line by itself to
|
||||
// a given handler.
|
||||
func CommentHandler(f func(s string)) Option {
|
||||
return func(p *Parser) {
|
||||
p.commentHandler = f
|
||||
}
|
||||
}
|
||||
|
||||
// A Parser parses Unicode Character Database (UCD) files.
|
||||
type Parser struct {
|
||||
scanner *bufio.Scanner
|
||||
|
||||
keepRanges bool // Don't expand rune ranges in field 0.
|
||||
|
||||
err error
|
||||
comment string
|
||||
field []string
|
||||
// parsedRange is needed in case Range(0) is called more than once for one
|
||||
// field. In some cases this requires scanning ahead.
|
||||
line int
|
||||
parsedRange bool
|
||||
rangeStart, rangeEnd rune
|
||||
|
||||
partHandler func(p *Parser)
|
||||
commentHandler func(s string)
|
||||
}
|
||||
|
||||
func (p *Parser) setError(err error, msg string) {
|
||||
if p.err == nil && err != nil {
|
||||
if msg == "" {
|
||||
p.err = fmt.Errorf("ucd:line:%d: %v", p.line, err)
|
||||
} else {
|
||||
p.err = fmt.Errorf("ucd:line:%d:%s: %v", p.line, msg, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Parser) getField(i int) string {
|
||||
if i >= len(p.field) {
|
||||
return ""
|
||||
}
|
||||
return p.field[i]
|
||||
}
|
||||
|
||||
// Err returns a non-nil error if any error occurred during parsing.
|
||||
func (p *Parser) Err() error {
|
||||
return p.err
|
||||
}
|
||||
|
||||
// New returns a Parser for the given Reader.
|
||||
func New(r io.Reader, o ...Option) *Parser {
|
||||
p := &Parser{
|
||||
scanner: bufio.NewScanner(r),
|
||||
}
|
||||
for _, f := range o {
|
||||
f(p)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// Next parses the next line in the file. It returns true if a line was parsed
|
||||
// and false if it reached the end of the file.
|
||||
func (p *Parser) Next() bool {
|
||||
if !p.keepRanges && p.rangeStart < p.rangeEnd {
|
||||
p.rangeStart++
|
||||
return true
|
||||
}
|
||||
p.comment = ""
|
||||
p.field = p.field[:0]
|
||||
p.parsedRange = false
|
||||
|
||||
for p.scanner.Scan() && p.err == nil {
|
||||
p.line++
|
||||
s := p.scanner.Text()
|
||||
if s == "" {
|
||||
continue
|
||||
}
|
||||
if s[0] == '#' {
|
||||
if p.commentHandler != nil {
|
||||
p.commentHandler(strings.TrimSpace(s[1:]))
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse line
|
||||
if i := strings.IndexByte(s, '#'); i != -1 {
|
||||
p.comment = strings.TrimSpace(s[i+1:])
|
||||
s = s[:i]
|
||||
}
|
||||
if s[0] == '@' {
|
||||
if p.partHandler != nil {
|
||||
p.field = append(p.field, strings.TrimSpace(s[1:]))
|
||||
p.partHandler(p)
|
||||
p.field = p.field[:0]
|
||||
}
|
||||
p.comment = ""
|
||||
continue
|
||||
}
|
||||
for {
|
||||
i := strings.IndexByte(s, ';')
|
||||
if i == -1 {
|
||||
p.field = append(p.field, strings.TrimSpace(s))
|
||||
break
|
||||
}
|
||||
p.field = append(p.field, strings.TrimSpace(s[:i]))
|
||||
s = s[i+1:]
|
||||
}
|
||||
if !p.keepRanges {
|
||||
p.rangeStart, p.rangeEnd = p.getRange(0)
|
||||
}
|
||||
return true
|
||||
}
|
||||
p.setError(p.scanner.Err(), "scanner failed")
|
||||
return false
|
||||
}
|
||||
|
||||
func parseRune(b string) (rune, error) {
|
||||
if len(b) > 2 && b[0] == 'U' && b[1] == '+' {
|
||||
b = b[2:]
|
||||
}
|
||||
x, err := strconv.ParseUint(b, 16, 32)
|
||||
return rune(x), err
|
||||
}
|
||||
|
||||
func (p *Parser) parseRune(s string) rune {
|
||||
x, err := parseRune(s)
|
||||
p.setError(err, "failed to parse rune")
|
||||
return x
|
||||
}
|
||||
|
||||
// Rune parses and returns field i as a rune.
|
||||
func (p *Parser) Rune(i int) rune {
|
||||
if i > 0 || p.keepRanges {
|
||||
return p.parseRune(p.getField(i))
|
||||
}
|
||||
return p.rangeStart
|
||||
}
|
||||
|
||||
// Runes interprets and returns field i as a sequence of runes.
|
||||
func (p *Parser) Runes(i int) (runes []rune) {
|
||||
add := func(s string) {
|
||||
if s = strings.TrimSpace(s); len(s) > 0 {
|
||||
runes = append(runes, p.parseRune(s))
|
||||
}
|
||||
}
|
||||
for b := p.getField(i); ; {
|
||||
i := strings.IndexByte(b, ' ')
|
||||
if i == -1 {
|
||||
add(b)
|
||||
break
|
||||
}
|
||||
add(b[:i])
|
||||
b = b[i+1:]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
errIncorrectLegacyRange = errors.New("ucd: unmatched <* First>")
|
||||
|
||||
// reRange matches one line of a legacy rune range.
|
||||
reRange = regexp.MustCompile("^([0-9A-F]*);<([^,]*), ([^>]*)>(.*)$")
|
||||
)
|
||||
|
||||
// Range parses and returns field i as a rune range. A range is inclusive at
|
||||
// both ends. If the field only has one rune, first and last will be identical.
|
||||
// It supports the legacy format for ranges used in UnicodeData.txt.
|
||||
func (p *Parser) Range(i int) (first, last rune) {
|
||||
if !p.keepRanges {
|
||||
return p.rangeStart, p.rangeStart
|
||||
}
|
||||
return p.getRange(i)
|
||||
}
|
||||
|
||||
func (p *Parser) getRange(i int) (first, last rune) {
|
||||
b := p.getField(i)
|
||||
if k := strings.Index(b, ".."); k != -1 {
|
||||
return p.parseRune(b[:k]), p.parseRune(b[k+2:])
|
||||
}
|
||||
// The first field may not be a rune, in which case we may ignore any error
|
||||
// and set the range as 0..0.
|
||||
x, err := parseRune(b)
|
||||
if err != nil {
|
||||
// Disable range parsing henceforth. This ensures that an error will be
|
||||
// returned if the user subsequently will try to parse this field as
|
||||
// a Rune.
|
||||
p.keepRanges = true
|
||||
}
|
||||
// Special case for UnicodeData that was retained for backwards compatibility.
|
||||
if i == 0 && len(p.field) > 1 && strings.HasSuffix(p.field[1], "First>") {
|
||||
if p.parsedRange {
|
||||
return p.rangeStart, p.rangeEnd
|
||||
}
|
||||
mf := reRange.FindStringSubmatch(p.scanner.Text())
|
||||
p.line++
|
||||
if mf == nil || !p.scanner.Scan() {
|
||||
p.setError(errIncorrectLegacyRange, "")
|
||||
return x, x
|
||||
}
|
||||
// Using Bytes would be more efficient here, but Text is a lot easier
|
||||
// and this is not a frequent case.
|
||||
ml := reRange.FindStringSubmatch(p.scanner.Text())
|
||||
if ml == nil || mf[2] != ml[2] || ml[3] != "Last" || mf[4] != ml[4] {
|
||||
p.setError(errIncorrectLegacyRange, "")
|
||||
return x, x
|
||||
}
|
||||
p.rangeStart, p.rangeEnd = x, p.parseRune(p.scanner.Text()[:len(ml[1])])
|
||||
p.parsedRange = true
|
||||
return p.rangeStart, p.rangeEnd
|
||||
}
|
||||
return x, x
|
||||
}
|
||||
|
||||
// bools recognizes all valid UCD boolean values.
|
||||
var bools = map[string]bool{
|
||||
"": false,
|
||||
"N": false,
|
||||
"No": false,
|
||||
"F": false,
|
||||
"False": false,
|
||||
"Y": true,
|
||||
"Yes": true,
|
||||
"T": true,
|
||||
"True": true,
|
||||
}
|
||||
|
||||
// Bool parses and returns field i as a boolean value.
|
||||
func (p *Parser) Bool(i int) bool {
|
||||
f := p.getField(i)
|
||||
for s, v := range bools {
|
||||
if f == s {
|
||||
return v
|
||||
}
|
||||
}
|
||||
p.setError(strconv.ErrSyntax, "error parsing bool")
|
||||
return false
|
||||
}
|
||||
|
||||
// Int parses and returns field i as an integer value.
|
||||
func (p *Parser) Int(i int) int {
|
||||
x, err := strconv.ParseInt(string(p.getField(i)), 10, 64)
|
||||
p.setError(err, "error parsing int")
|
||||
return int(x)
|
||||
}
|
||||
|
||||
// Uint parses and returns field i as an unsigned integer value.
|
||||
func (p *Parser) Uint(i int) uint {
|
||||
x, err := strconv.ParseUint(string(p.getField(i)), 10, 64)
|
||||
p.setError(err, "error parsing uint")
|
||||
return uint(x)
|
||||
}
|
||||
|
||||
// Float parses and returns field i as a decimal value.
|
||||
func (p *Parser) Float(i int) float64 {
|
||||
x, err := strconv.ParseFloat(string(p.getField(i)), 64)
|
||||
p.setError(err, "error parsing float")
|
||||
return x
|
||||
}
|
||||
|
||||
// String parses and returns field i as a string value.
|
||||
func (p *Parser) String(i int) string {
|
||||
return string(p.getField(i))
|
||||
}
|
||||
|
||||
// Strings parses and returns field i as a space-separated list of strings.
|
||||
func (p *Parser) Strings(i int) []string {
|
||||
ss := strings.Split(string(p.getField(i)), " ")
|
||||
for i, s := range ss {
|
||||
ss[i] = strings.TrimSpace(s)
|
||||
}
|
||||
return ss
|
||||
}
|
||||
|
||||
// Comment returns the comments for the current line.
|
||||
func (p *Parser) Comment() string {
|
||||
return string(p.comment)
|
||||
}
|
||||
|
||||
var errUndefinedEnum = errors.New("ucd: undefined enum value")
|
||||
|
||||
// Enum interprets and returns field i as a value that must be one of the values
|
||||
// in enum.
|
||||
func (p *Parser) Enum(i int, enum ...string) string {
|
||||
f := p.getField(i)
|
||||
for _, s := range enum {
|
||||
if f == s {
|
||||
return s
|
||||
}
|
||||
}
|
||||
p.setError(errUndefinedEnum, "error parsing enum")
|
||||
return ""
|
||||
}
|
16
vendor/golang.org/x/text/language/Makefile
generated
vendored
16
vendor/golang.org/x/text/language/Makefile
generated
vendored
@ -1,16 +0,0 @@
|
||||
# Copyright 2013 The Go Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style
|
||||
# license that can be found in the LICENSE file.
|
||||
|
||||
CLEANFILES+=maketables
|
||||
|
||||
maketables: maketables.go
|
||||
go build $^
|
||||
|
||||
tables: maketables
|
||||
./maketables > tables.go
|
||||
gofmt -w -s tables.go
|
||||
|
||||
# Build (but do not run) maketables during testing,
|
||||
# just to make sure it still compiles.
|
||||
testshort: maketables
|
16
vendor/golang.org/x/text/language/common.go
generated
vendored
16
vendor/golang.org/x/text/language/common.go
generated
vendored
@ -1,16 +0,0 @@
|
||||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||
|
||||
package language
|
||||
|
||||
// This file contains code common to the maketables.go and the package code.
|
||||
|
||||
// langAliasType is the type of an alias in langAliasMap.
|
||||
type langAliasType int8
|
||||
|
||||
const (
|
||||
langDeprecated langAliasType = iota
|
||||
langMacro
|
||||
langLegacy
|
||||
|
||||
langAliasTypeUnknown langAliasType = -1
|
||||
)
|
197
vendor/golang.org/x/text/language/coverage.go
generated
vendored
197
vendor/golang.org/x/text/language/coverage.go
generated
vendored
@ -1,197 +0,0 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package language
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// The Coverage interface is used to define the level of coverage of an
|
||||
// internationalization service. Note that not all types are supported by all
|
||||
// services. As lists may be generated on the fly, it is recommended that users
|
||||
// of a Coverage cache the results.
|
||||
type Coverage interface {
|
||||
// Tags returns the list of supported tags.
|
||||
Tags() []Tag
|
||||
|
||||
// BaseLanguages returns the list of supported base languages.
|
||||
BaseLanguages() []Base
|
||||
|
||||
// Scripts returns the list of supported scripts.
|
||||
Scripts() []Script
|
||||
|
||||
// Regions returns the list of supported regions.
|
||||
Regions() []Region
|
||||
}
|
||||
|
||||
var (
|
||||
// Supported defines a Coverage that lists all supported subtags. Tags
|
||||
// always returns nil.
|
||||
Supported Coverage = allSubtags{}
|
||||
)
|
||||
|
||||
// TODO:
|
||||
// - Support Variants, numbering systems.
|
||||
// - CLDR coverage levels.
|
||||
// - Set of common tags defined in this package.
|
||||
|
||||
type allSubtags struct{}
|
||||
|
||||
// Regions returns the list of supported regions. As all regions are in a
|
||||
// consecutive range, it simply returns a slice of numbers in increasing order.
|
||||
// The "undefined" region is not returned.
|
||||
func (s allSubtags) Regions() []Region {
|
||||
reg := make([]Region, numRegions)
|
||||
for i := range reg {
|
||||
reg[i] = Region{regionID(i + 1)}
|
||||
}
|
||||
return reg
|
||||
}
|
||||
|
||||
// Scripts returns the list of supported scripts. As all scripts are in a
|
||||
// consecutive range, it simply returns a slice of numbers in increasing order.
|
||||
// The "undefined" script is not returned.
|
||||
func (s allSubtags) Scripts() []Script {
|
||||
scr := make([]Script, numScripts)
|
||||
for i := range scr {
|
||||
scr[i] = Script{scriptID(i + 1)}
|
||||
}
|
||||
return scr
|
||||
}
|
||||
|
||||
// BaseLanguages returns the list of all supported base languages. It generates
|
||||
// the list by traversing the internal structures.
|
||||
func (s allSubtags) BaseLanguages() []Base {
|
||||
base := make([]Base, 0, numLanguages)
|
||||
for i := 0; i < langNoIndexOffset; i++ {
|
||||
// We included "und" already for the value 0.
|
||||
if i != nonCanonicalUnd {
|
||||
base = append(base, Base{langID(i)})
|
||||
}
|
||||
}
|
||||
i := langNoIndexOffset
|
||||
for _, v := range langNoIndex {
|
||||
for k := 0; k < 8; k++ {
|
||||
if v&1 == 1 {
|
||||
base = append(base, Base{langID(i)})
|
||||
}
|
||||
v >>= 1
|
||||
i++
|
||||
}
|
||||
}
|
||||
return base
|
||||
}
|
||||
|
||||
// Tags always returns nil.
|
||||
func (s allSubtags) Tags() []Tag {
|
||||
return nil
|
||||
}
|
||||
|
||||
// coverage is used used by NewCoverage which is used as a convenient way for
|
||||
// creating Coverage implementations for partially defined data. Very often a
|
||||
// package will only need to define a subset of slices. coverage provides a
|
||||
// convenient way to do this. Moreover, packages using NewCoverage, instead of
|
||||
// their own implementation, will not break if later new slice types are added.
|
||||
type coverage struct {
|
||||
tags func() []Tag
|
||||
bases func() []Base
|
||||
scripts func() []Script
|
||||
regions func() []Region
|
||||
}
|
||||
|
||||
func (s *coverage) Tags() []Tag {
|
||||
if s.tags == nil {
|
||||
return nil
|
||||
}
|
||||
return s.tags()
|
||||
}
|
||||
|
||||
// bases implements sort.Interface and is used to sort base languages.
|
||||
type bases []Base
|
||||
|
||||
func (b bases) Len() int {
|
||||
return len(b)
|
||||
}
|
||||
|
||||
func (b bases) Swap(i, j int) {
|
||||
b[i], b[j] = b[j], b[i]
|
||||
}
|
||||
|
||||
func (b bases) Less(i, j int) bool {
|
||||
return b[i].langID < b[j].langID
|
||||
}
|
||||
|
||||
// BaseLanguages returns the result from calling s.bases if it is specified or
|
||||
// otherwise derives the set of supported base languages from tags.
|
||||
func (s *coverage) BaseLanguages() []Base {
|
||||
if s.bases == nil {
|
||||
tags := s.Tags()
|
||||
if len(tags) == 0 {
|
||||
return nil
|
||||
}
|
||||
a := make([]Base, len(tags))
|
||||
for i, t := range tags {
|
||||
a[i] = Base{langID(t.lang)}
|
||||
}
|
||||
sort.Sort(bases(a))
|
||||
k := 0
|
||||
for i := 1; i < len(a); i++ {
|
||||
if a[k] != a[i] {
|
||||
k++
|
||||
a[k] = a[i]
|
||||
}
|
||||
}
|
||||
return a[:k+1]
|
||||
}
|
||||
return s.bases()
|
||||
}
|
||||
|
||||
func (s *coverage) Scripts() []Script {
|
||||
if s.scripts == nil {
|
||||
return nil
|
||||
}
|
||||
return s.scripts()
|
||||
}
|
||||
|
||||
func (s *coverage) Regions() []Region {
|
||||
if s.regions == nil {
|
||||
return nil
|
||||
}
|
||||
return s.regions()
|
||||
}
|
||||
|
||||
// NewCoverage returns a Coverage for the given lists. It is typically used by
|
||||
// packages providing internationalization services to define their level of
|
||||
// coverage. A list may be of type []T or func() []T, where T is either Tag,
|
||||
// Base, Script or Region. The returned Coverage derives the value for Bases
|
||||
// from Tags if no func or slice for []Base is specified. For other unspecified
|
||||
// types the returned Coverage will return nil for the respective methods.
|
||||
func NewCoverage(list ...interface{}) Coverage {
|
||||
s := &coverage{}
|
||||
for _, x := range list {
|
||||
switch v := x.(type) {
|
||||
case func() []Base:
|
||||
s.bases = v
|
||||
case func() []Script:
|
||||
s.scripts = v
|
||||
case func() []Region:
|
||||
s.regions = v
|
||||
case func() []Tag:
|
||||
s.tags = v
|
||||
case []Base:
|
||||
s.bases = func() []Base { return v }
|
||||
case []Script:
|
||||
s.scripts = func() []Script { return v }
|
||||
case []Region:
|
||||
s.regions = func() []Region { return v }
|
||||
case []Tag:
|
||||
s.tags = func() []Tag { return v }
|
||||
default:
|
||||
panic(fmt.Sprintf("language: unsupported set type %T", v))
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
102
vendor/golang.org/x/text/language/doc.go
generated
vendored
102
vendor/golang.org/x/text/language/doc.go
generated
vendored
@ -1,102 +0,0 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package language implements BCP 47 language tags and related functionality.
|
||||
//
|
||||
// The most important function of package language is to match a list of
|
||||
// user-preferred languages to a list of supported languages.
|
||||
// It alleviates the developer of dealing with the complexity of this process
|
||||
// and provides the user with the best experience
|
||||
// (see https://blog.golang.org/matchlang).
|
||||
//
|
||||
//
|
||||
// Matching preferred against supported languages
|
||||
//
|
||||
// A Matcher for an application that supports English, Australian English,
|
||||
// Danish, and standard Mandarin can be created as follows:
|
||||
//
|
||||
// var matcher = language.NewMatcher([]language.Tag{
|
||||
// language.English, // The first language is used as fallback.
|
||||
// language.MustParse("en-AU"),
|
||||
// language.Danish,
|
||||
// language.Chinese,
|
||||
// })
|
||||
//
|
||||
// This list of supported languages is typically implied by the languages for
|
||||
// which there exists translations of the user interface.
|
||||
//
|
||||
// User-preferred languages usually come as a comma-separated list of BCP 47
|
||||
// language tags.
|
||||
// The MatchString finds best matches for such strings:
|
||||
//
|
||||
// handler(w http.ResponseWriter, r *http.Request) {
|
||||
// lang, _ := r.Cookie("lang")
|
||||
// accept := r.Header.Get("Accept-Language")
|
||||
// tag, _ := language.MatchStrings(matcher, lang.String(), accept)
|
||||
//
|
||||
// // tag should now be used for the initialization of any
|
||||
// // locale-specific service.
|
||||
// }
|
||||
//
|
||||
// The Matcher's Match method can be used to match Tags directly.
|
||||
//
|
||||
// Matchers are aware of the intricacies of equivalence between languages, such
|
||||
// as deprecated subtags, legacy tags, macro languages, mutual
|
||||
// intelligibility between scripts and languages, and transparently passing
|
||||
// BCP 47 user configuration.
|
||||
// For instance, it will know that a reader of Bokmål Danish can read Norwegian
|
||||
// and will know that Cantonese ("yue") is a good match for "zh-HK".
|
||||
//
|
||||
//
|
||||
// Using match results
|
||||
//
|
||||
// To guarantee a consistent user experience to the user it is important to
|
||||
// use the same language tag for the selection of any locale-specific services.
|
||||
// For example, it is utterly confusing to substitute spelled-out numbers
|
||||
// or dates in one language in text of another language.
|
||||
// More subtly confusing is using the wrong sorting order or casing
|
||||
// algorithm for a certain language.
|
||||
//
|
||||
// All the packages in x/text that provide locale-specific services
|
||||
// (e.g. collate, cases) should be initialized with the tag that was
|
||||
// obtained at the start of an interaction with the user.
|
||||
//
|
||||
// Note that Tag that is returned by Match and MatchString may differ from any
|
||||
// of the supported languages, as it may contain carried over settings from
|
||||
// the user tags.
|
||||
// This may be inconvenient when your application has some additional
|
||||
// locale-specific data for your supported languages.
|
||||
// Match and MatchString both return the index of the matched supported tag
|
||||
// to simplify associating such data with the matched tag.
|
||||
//
|
||||
//
|
||||
// Canonicalization
|
||||
//
|
||||
// If one uses the Matcher to compare languages one does not need to
|
||||
// worry about canonicalization.
|
||||
//
|
||||
// The meaning of a Tag varies per application. The language package
|
||||
// therefore delays canonicalization and preserves information as much
|
||||
// as possible. The Matcher, however, will always take into account that
|
||||
// two different tags may represent the same language.
|
||||
//
|
||||
// By default, only legacy and deprecated tags are converted into their
|
||||
// canonical equivalent. All other information is preserved. This approach makes
|
||||
// the confidence scores more accurate and allows matchers to distinguish
|
||||
// between variants that are otherwise lost.
|
||||
//
|
||||
// As a consequence, two tags that should be treated as identical according to
|
||||
// BCP 47 or CLDR, like "en-Latn" and "en", will be represented differently. The
|
||||
// Matcher handles such distinctions, though, and is aware of the
|
||||
// equivalence relations. The CanonType type can be used to alter the
|
||||
// canonicalization form.
|
||||
//
|
||||
// References
|
||||
//
|
||||
// BCP 47 - Tags for Identifying Languages http://tools.ietf.org/html/bcp47
|
||||
//
|
||||
package language // import "golang.org/x/text/language"
|
||||
|
||||
// TODO: explanation on how to match languages for your own locale-specific
|
||||
// service.
|
1712
vendor/golang.org/x/text/language/gen.go
generated
vendored
1712
vendor/golang.org/x/text/language/gen.go
generated
vendored
File diff suppressed because it is too large
Load Diff
20
vendor/golang.org/x/text/language/gen_common.go
generated
vendored
20
vendor/golang.org/x/text/language/gen_common.go
generated
vendored
@ -1,20 +0,0 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
// This file contains code common to the maketables.go and the package code.
|
||||
|
||||
// langAliasType is the type of an alias in langAliasMap.
|
||||
type langAliasType int8
|
||||
|
||||
const (
|
||||
langDeprecated langAliasType = iota
|
||||
langMacro
|
||||
langLegacy
|
||||
|
||||
langAliasTypeUnknown langAliasType = -1
|
||||
)
|
162
vendor/golang.org/x/text/language/gen_index.go
generated
vendored
162
vendor/golang.org/x/text/language/gen_index.go
generated
vendored
@ -1,162 +0,0 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
// This file generates derivative tables based on the language package itself.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/text/internal/gen"
|
||||
"golang.org/x/text/language"
|
||||
"golang.org/x/text/unicode/cldr"
|
||||
)
|
||||
|
||||
var (
|
||||
test = flag.Bool("test", false,
|
||||
"test existing tables; can be used to compare web data with package data.")
|
||||
|
||||
draft = flag.String("draft",
|
||||
"contributed",
|
||||
`Minimal draft requirements (approved, contributed, provisional, unconfirmed).`)
|
||||
)
|
||||
|
||||
func main() {
|
||||
gen.Init()
|
||||
|
||||
// Read the CLDR zip file.
|
||||
r := gen.OpenCLDRCoreZip()
|
||||
defer r.Close()
|
||||
|
||||
d := &cldr.Decoder{}
|
||||
data, err := d.DecodeZip(r)
|
||||
if err != nil {
|
||||
log.Fatalf("DecodeZip: %v", err)
|
||||
}
|
||||
|
||||
w := gen.NewCodeWriter()
|
||||
defer func() {
|
||||
buf := &bytes.Buffer{}
|
||||
|
||||
if _, err = w.WriteGo(buf, "language", ""); err != nil {
|
||||
log.Fatalf("Error formatting file index.go: %v", err)
|
||||
}
|
||||
|
||||
// Since we're generating a table for our own package we need to rewrite
|
||||
// doing the equivalent of go fmt -r 'language.b -> b'. Using
|
||||
// bytes.Replace will do.
|
||||
out := bytes.Replace(buf.Bytes(), []byte("language."), nil, -1)
|
||||
if err := ioutil.WriteFile("index.go", out, 0600); err != nil {
|
||||
log.Fatalf("Could not create file index.go: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
m := map[language.Tag]bool{}
|
||||
for _, lang := range data.Locales() {
|
||||
// We include all locales unconditionally to be consistent with en_US.
|
||||
// We want en_US, even though it has no data associated with it.
|
||||
|
||||
// TODO: put any of the languages for which no data exists at the end
|
||||
// of the index. This allows all components based on ICU to use that
|
||||
// as the cutoff point.
|
||||
// if x := data.RawLDML(lang); false ||
|
||||
// x.LocaleDisplayNames != nil ||
|
||||
// x.Characters != nil ||
|
||||
// x.Delimiters != nil ||
|
||||
// x.Measurement != nil ||
|
||||
// x.Dates != nil ||
|
||||
// x.Numbers != nil ||
|
||||
// x.Units != nil ||
|
||||
// x.ListPatterns != nil ||
|
||||
// x.Collations != nil ||
|
||||
// x.Segmentations != nil ||
|
||||
// x.Rbnf != nil ||
|
||||
// x.Annotations != nil ||
|
||||
// x.Metadata != nil {
|
||||
|
||||
// TODO: support POSIX natively, albeit non-standard.
|
||||
tag := language.Make(strings.Replace(lang, "_POSIX", "-u-va-posix", 1))
|
||||
m[tag] = true
|
||||
// }
|
||||
}
|
||||
// Include locales for plural rules, which uses a different structure.
|
||||
for _, plurals := range data.Supplemental().Plurals {
|
||||
for _, rules := range plurals.PluralRules {
|
||||
for _, lang := range strings.Split(rules.Locales, " ") {
|
||||
m[language.Make(lang)] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var core, special []language.Tag
|
||||
|
||||
for t := range m {
|
||||
if x := t.Extensions(); len(x) != 0 && fmt.Sprint(x) != "[u-va-posix]" {
|
||||
log.Fatalf("Unexpected extension %v in %v", x, t)
|
||||
}
|
||||
if len(t.Variants()) == 0 && len(t.Extensions()) == 0 {
|
||||
core = append(core, t)
|
||||
} else {
|
||||
special = append(special, t)
|
||||
}
|
||||
}
|
||||
|
||||
w.WriteComment(`
|
||||
NumCompactTags is the number of common tags. The maximum tag is
|
||||
NumCompactTags-1.`)
|
||||
w.WriteConst("NumCompactTags", len(core)+len(special))
|
||||
|
||||
sort.Sort(byAlpha(special))
|
||||
w.WriteVar("specialTags", special)
|
||||
|
||||
// TODO: order by frequency?
|
||||
sort.Sort(byAlpha(core))
|
||||
|
||||
// Size computations are just an estimate.
|
||||
w.Size += int(reflect.TypeOf(map[uint32]uint16{}).Size())
|
||||
w.Size += len(core) * 6 // size of uint32 and uint16
|
||||
|
||||
fmt.Fprintln(w)
|
||||
fmt.Fprintln(w, "var coreTags = map[uint32]uint16{")
|
||||
fmt.Fprintln(w, "0x0: 0, // und")
|
||||
i := len(special) + 1 // Und and special tags already written.
|
||||
for _, t := range core {
|
||||
if t == language.Und {
|
||||
continue
|
||||
}
|
||||
fmt.Fprint(w.Hash, t, i)
|
||||
b, s, r := t.Raw()
|
||||
fmt.Fprintf(w, "0x%s%s%s: %d, // %s\n",
|
||||
getIndex(b, 3), // 3 is enough as it is guaranteed to be a compact number
|
||||
getIndex(s, 2),
|
||||
getIndex(r, 3),
|
||||
i, t)
|
||||
i++
|
||||
}
|
||||
fmt.Fprintln(w, "}")
|
||||
}
|
||||
|
||||
// getIndex prints the subtag type and extracts its index of size nibble.
|
||||
// If the index is less than n nibbles, the result is prefixed with 0s.
|
||||
func getIndex(x interface{}, n int) string {
|
||||
s := fmt.Sprintf("%#v", x) // s is of form Type{typeID: 0x00}
|
||||
s = s[strings.Index(s, "0x")+2 : len(s)-1]
|
||||
return strings.Repeat("0", n-len(s)) + s
|
||||
}
|
||||
|
||||
type byAlpha []language.Tag
|
||||
|
||||
func (a byAlpha) Len() int { return len(a) }
|
||||
func (a byAlpha) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a byAlpha) Less(i, j int) bool { return a[i].String() < a[j].String() }
|
38
vendor/golang.org/x/text/language/go1_1.go
generated
vendored
38
vendor/golang.org/x/text/language/go1_1.go
generated
vendored
@ -1,38 +0,0 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.2
|
||||
|
||||
package language
|
||||
|
||||
import "sort"
|
||||
|
||||
func sortStable(s sort.Interface) {
|
||||
ss := stableSort{
|
||||
s: s,
|
||||
pos: make([]int, s.Len()),
|
||||
}
|
||||
for i := range ss.pos {
|
||||
ss.pos[i] = i
|
||||
}
|
||||
sort.Sort(&ss)
|
||||
}
|
||||
|
||||
type stableSort struct {
|
||||
s sort.Interface
|
||||
pos []int
|
||||
}
|
||||
|
||||
func (s *stableSort) Len() int {
|
||||
return len(s.pos)
|
||||
}
|
||||
|
||||
func (s *stableSort) Less(i, j int) bool {
|
||||
return s.s.Less(i, j) || !s.s.Less(j, i) && s.pos[i] < s.pos[j]
|
||||
}
|
||||
|
||||
func (s *stableSort) Swap(i, j int) {
|
||||
s.s.Swap(i, j)
|
||||
s.pos[i], s.pos[j] = s.pos[j], s.pos[i]
|
||||
}
|
11
vendor/golang.org/x/text/language/go1_2.go
generated
vendored
11
vendor/golang.org/x/text/language/go1_2.go
generated
vendored
@ -1,11 +0,0 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.2
|
||||
|
||||
package language
|
||||
|
||||
import "sort"
|
||||
|
||||
var sortStable = sort.Stable
|
783
vendor/golang.org/x/text/language/index.go
generated
vendored
783
vendor/golang.org/x/text/language/index.go
generated
vendored
@ -1,783 +0,0 @@
|
||||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||
|
||||
package language
|
||||
|
||||
// NumCompactTags is the number of common tags. The maximum tag is
|
||||
// NumCompactTags-1.
|
||||
const NumCompactTags = 768
|
||||
|
||||
var specialTags = []Tag{ // 2 elements
|
||||
0: {lang: 0xd7, region: 0x6e, script: 0x0, pVariant: 0x5, pExt: 0xe, str: "ca-ES-valencia"},
|
||||
1: {lang: 0x139, region: 0x135, script: 0x0, pVariant: 0x5, pExt: 0x5, str: "en-US-u-va-posix"},
|
||||
} // Size: 72 bytes
|
||||
|
||||
var coreTags = map[uint32]uint16{
|
||||
0x0: 0, // und
|
||||
0x01600000: 3, // af
|
||||
0x016000d2: 4, // af-NA
|
||||
0x01600161: 5, // af-ZA
|
||||
0x01c00000: 6, // agq
|
||||
0x01c00052: 7, // agq-CM
|
||||
0x02100000: 8, // ak
|
||||
0x02100080: 9, // ak-GH
|
||||
0x02700000: 10, // am
|
||||
0x0270006f: 11, // am-ET
|
||||
0x03a00000: 12, // ar
|
||||
0x03a00001: 13, // ar-001
|
||||
0x03a00023: 14, // ar-AE
|
||||
0x03a00039: 15, // ar-BH
|
||||
0x03a00062: 16, // ar-DJ
|
||||
0x03a00067: 17, // ar-DZ
|
||||
0x03a0006b: 18, // ar-EG
|
||||
0x03a0006c: 19, // ar-EH
|
||||
0x03a0006d: 20, // ar-ER
|
||||
0x03a00097: 21, // ar-IL
|
||||
0x03a0009b: 22, // ar-IQ
|
||||
0x03a000a1: 23, // ar-JO
|
||||
0x03a000a8: 24, // ar-KM
|
||||
0x03a000ac: 25, // ar-KW
|
||||
0x03a000b0: 26, // ar-LB
|
||||
0x03a000b9: 27, // ar-LY
|
||||
0x03a000ba: 28, // ar-MA
|
||||
0x03a000c9: 29, // ar-MR
|
||||
0x03a000e1: 30, // ar-OM
|
||||
0x03a000ed: 31, // ar-PS
|
||||
0x03a000f3: 32, // ar-QA
|
||||
0x03a00108: 33, // ar-SA
|
||||
0x03a0010b: 34, // ar-SD
|
||||
0x03a00115: 35, // ar-SO
|
||||
0x03a00117: 36, // ar-SS
|
||||
0x03a0011c: 37, // ar-SY
|
||||
0x03a00120: 38, // ar-TD
|
||||
0x03a00128: 39, // ar-TN
|
||||
0x03a0015e: 40, // ar-YE
|
||||
0x04000000: 41, // ars
|
||||
0x04300000: 42, // as
|
||||
0x04300099: 43, // as-IN
|
||||
0x04400000: 44, // asa
|
||||
0x0440012f: 45, // asa-TZ
|
||||
0x04800000: 46, // ast
|
||||
0x0480006e: 47, // ast-ES
|
||||
0x05800000: 48, // az
|
||||
0x0581f000: 49, // az-Cyrl
|
||||
0x0581f032: 50, // az-Cyrl-AZ
|
||||
0x05857000: 51, // az-Latn
|
||||
0x05857032: 52, // az-Latn-AZ
|
||||
0x05e00000: 53, // bas
|
||||
0x05e00052: 54, // bas-CM
|
||||
0x07100000: 55, // be
|
||||
0x07100047: 56, // be-BY
|
||||
0x07500000: 57, // bem
|
||||
0x07500162: 58, // bem-ZM
|
||||
0x07900000: 59, // bez
|
||||
0x0790012f: 60, // bez-TZ
|
||||
0x07e00000: 61, // bg
|
||||
0x07e00038: 62, // bg-BG
|
||||
0x08200000: 63, // bh
|
||||
0x0a000000: 64, // bm
|
||||
0x0a0000c3: 65, // bm-ML
|
||||
0x0a500000: 66, // bn
|
||||
0x0a500035: 67, // bn-BD
|
||||
0x0a500099: 68, // bn-IN
|
||||
0x0a900000: 69, // bo
|
||||
0x0a900053: 70, // bo-CN
|
||||
0x0a900099: 71, // bo-IN
|
||||
0x0b200000: 72, // br
|
||||
0x0b200078: 73, // br-FR
|
||||
0x0b500000: 74, // brx
|
||||
0x0b500099: 75, // brx-IN
|
||||
0x0b700000: 76, // bs
|
||||
0x0b71f000: 77, // bs-Cyrl
|
||||
0x0b71f033: 78, // bs-Cyrl-BA
|
||||
0x0b757000: 79, // bs-Latn
|
||||
0x0b757033: 80, // bs-Latn-BA
|
||||
0x0d700000: 81, // ca
|
||||
0x0d700022: 82, // ca-AD
|
||||
0x0d70006e: 83, // ca-ES
|
||||
0x0d700078: 84, // ca-FR
|
||||
0x0d70009e: 85, // ca-IT
|
||||
0x0db00000: 86, // ccp
|
||||
0x0db00035: 87, // ccp-BD
|
||||
0x0db00099: 88, // ccp-IN
|
||||
0x0dc00000: 89, // ce
|
||||
0x0dc00106: 90, // ce-RU
|
||||
0x0df00000: 91, // cgg
|
||||
0x0df00131: 92, // cgg-UG
|
||||
0x0e500000: 93, // chr
|
||||
0x0e500135: 94, // chr-US
|
||||
0x0e900000: 95, // ckb
|
||||
0x0e90009b: 96, // ckb-IQ
|
||||
0x0e90009c: 97, // ckb-IR
|
||||
0x0fa00000: 98, // cs
|
||||
0x0fa0005e: 99, // cs-CZ
|
||||
0x0fe00000: 100, // cu
|
||||
0x0fe00106: 101, // cu-RU
|
||||
0x10000000: 102, // cy
|
||||
0x1000007b: 103, // cy-GB
|
||||
0x10100000: 104, // da
|
||||
0x10100063: 105, // da-DK
|
||||
0x10100082: 106, // da-GL
|
||||
0x10800000: 107, // dav
|
||||
0x108000a4: 108, // dav-KE
|
||||
0x10d00000: 109, // de
|
||||
0x10d0002e: 110, // de-AT
|
||||
0x10d00036: 111, // de-BE
|
||||
0x10d0004e: 112, // de-CH
|
||||
0x10d00060: 113, // de-DE
|
||||
0x10d0009e: 114, // de-IT
|
||||
0x10d000b2: 115, // de-LI
|
||||
0x10d000b7: 116, // de-LU
|
||||
0x11700000: 117, // dje
|
||||
0x117000d4: 118, // dje-NE
|
||||
0x11f00000: 119, // dsb
|
||||
0x11f00060: 120, // dsb-DE
|
||||
0x12400000: 121, // dua
|
||||
0x12400052: 122, // dua-CM
|
||||
0x12800000: 123, // dv
|
||||
0x12b00000: 124, // dyo
|
||||
0x12b00114: 125, // dyo-SN
|
||||
0x12d00000: 126, // dz
|
||||
0x12d00043: 127, // dz-BT
|
||||
0x12f00000: 128, // ebu
|
||||
0x12f000a4: 129, // ebu-KE
|
||||
0x13000000: 130, // ee
|
||||
0x13000080: 131, // ee-GH
|
||||
0x13000122: 132, // ee-TG
|
||||
0x13600000: 133, // el
|
||||
0x1360005d: 134, // el-CY
|
||||
0x13600087: 135, // el-GR
|
||||
0x13900000: 136, // en
|
||||
0x13900001: 137, // en-001
|
||||
0x1390001a: 138, // en-150
|
||||
0x13900025: 139, // en-AG
|
||||
0x13900026: 140, // en-AI
|
||||
0x1390002d: 141, // en-AS
|
||||
0x1390002e: 142, // en-AT
|
||||
0x1390002f: 143, // en-AU
|
||||
0x13900034: 144, // en-BB
|
||||
0x13900036: 145, // en-BE
|
||||
0x1390003a: 146, // en-BI
|
||||
0x1390003d: 147, // en-BM
|
||||
0x13900042: 148, // en-BS
|
||||
0x13900046: 149, // en-BW
|
||||
0x13900048: 150, // en-BZ
|
||||
0x13900049: 151, // en-CA
|
||||
0x1390004a: 152, // en-CC
|
||||
0x1390004e: 153, // en-CH
|
||||
0x13900050: 154, // en-CK
|
||||
0x13900052: 155, // en-CM
|
||||
0x1390005c: 156, // en-CX
|
||||
0x1390005d: 157, // en-CY
|
||||
0x13900060: 158, // en-DE
|
||||
0x13900061: 159, // en-DG
|
||||
0x13900063: 160, // en-DK
|
||||
0x13900064: 161, // en-DM
|
||||
0x1390006d: 162, // en-ER
|
||||
0x13900072: 163, // en-FI
|
||||
0x13900073: 164, // en-FJ
|
||||
0x13900074: 165, // en-FK
|
||||
0x13900075: 166, // en-FM
|
||||
0x1390007b: 167, // en-GB
|
||||
0x1390007c: 168, // en-GD
|
||||
0x1390007f: 169, // en-GG
|
||||
0x13900080: 170, // en-GH
|
||||
0x13900081: 171, // en-GI
|
||||
0x13900083: 172, // en-GM
|
||||
0x1390008a: 173, // en-GU
|
||||
0x1390008c: 174, // en-GY
|
||||
0x1390008d: 175, // en-HK
|
||||
0x13900096: 176, // en-IE
|
||||
0x13900097: 177, // en-IL
|
||||
0x13900098: 178, // en-IM
|
||||
0x13900099: 179, // en-IN
|
||||
0x1390009a: 180, // en-IO
|
||||
0x1390009f: 181, // en-JE
|
||||
0x139000a0: 182, // en-JM
|
||||
0x139000a4: 183, // en-KE
|
||||
0x139000a7: 184, // en-KI
|
||||
0x139000a9: 185, // en-KN
|
||||
0x139000ad: 186, // en-KY
|
||||
0x139000b1: 187, // en-LC
|
||||
0x139000b4: 188, // en-LR
|
||||
0x139000b5: 189, // en-LS
|
||||
0x139000bf: 190, // en-MG
|
||||
0x139000c0: 191, // en-MH
|
||||
0x139000c6: 192, // en-MO
|
||||
0x139000c7: 193, // en-MP
|
||||
0x139000ca: 194, // en-MS
|
||||
0x139000cb: 195, // en-MT
|
||||
0x139000cc: 196, // en-MU
|
||||
0x139000ce: 197, // en-MW
|
||||
0x139000d0: 198, // en-MY
|
||||
0x139000d2: 199, // en-NA
|
||||
0x139000d5: 200, // en-NF
|
||||
0x139000d6: 201, // en-NG
|
||||
0x139000d9: 202, // en-NL
|
||||
0x139000dd: 203, // en-NR
|
||||
0x139000df: 204, // en-NU
|
||||
0x139000e0: 205, // en-NZ
|
||||
0x139000e6: 206, // en-PG
|
||||
0x139000e7: 207, // en-PH
|
||||
0x139000e8: 208, // en-PK
|
||||
0x139000eb: 209, // en-PN
|
||||
0x139000ec: 210, // en-PR
|
||||
0x139000f0: 211, // en-PW
|
||||
0x13900107: 212, // en-RW
|
||||
0x13900109: 213, // en-SB
|
||||
0x1390010a: 214, // en-SC
|
||||
0x1390010b: 215, // en-SD
|
||||
0x1390010c: 216, // en-SE
|
||||
0x1390010d: 217, // en-SG
|
||||
0x1390010e: 218, // en-SH
|
||||
0x1390010f: 219, // en-SI
|
||||
0x13900112: 220, // en-SL
|
||||
0x13900117: 221, // en-SS
|
||||
0x1390011b: 222, // en-SX
|
||||
0x1390011d: 223, // en-SZ
|
||||
0x1390011f: 224, // en-TC
|
||||
0x13900125: 225, // en-TK
|
||||
0x13900129: 226, // en-TO
|
||||
0x1390012c: 227, // en-TT
|
||||
0x1390012d: 228, // en-TV
|
||||
0x1390012f: 229, // en-TZ
|
||||
0x13900131: 230, // en-UG
|
||||
0x13900133: 231, // en-UM
|
||||
0x13900135: 232, // en-US
|
||||
0x13900139: 233, // en-VC
|
||||
0x1390013c: 234, // en-VG
|
||||
0x1390013d: 235, // en-VI
|
||||
0x1390013f: 236, // en-VU
|
||||
0x13900142: 237, // en-WS
|
||||
0x13900161: 238, // en-ZA
|
||||
0x13900162: 239, // en-ZM
|
||||
0x13900164: 240, // en-ZW
|
||||
0x13c00000: 241, // eo
|
||||
0x13c00001: 242, // eo-001
|
||||
0x13e00000: 243, // es
|
||||
0x13e0001f: 244, // es-419
|
||||
0x13e0002c: 245, // es-AR
|
||||
0x13e0003f: 246, // es-BO
|
||||
0x13e00041: 247, // es-BR
|
||||
0x13e00048: 248, // es-BZ
|
||||
0x13e00051: 249, // es-CL
|
||||
0x13e00054: 250, // es-CO
|
||||
0x13e00056: 251, // es-CR
|
||||
0x13e00059: 252, // es-CU
|
||||
0x13e00065: 253, // es-DO
|
||||
0x13e00068: 254, // es-EA
|
||||
0x13e00069: 255, // es-EC
|
||||
0x13e0006e: 256, // es-ES
|
||||
0x13e00086: 257, // es-GQ
|
||||
0x13e00089: 258, // es-GT
|
||||
0x13e0008f: 259, // es-HN
|
||||
0x13e00094: 260, // es-IC
|
||||
0x13e000cf: 261, // es-MX
|
||||
0x13e000d8: 262, // es-NI
|
||||
0x13e000e2: 263, // es-PA
|
||||
0x13e000e4: 264, // es-PE
|
||||
0x13e000e7: 265, // es-PH
|
||||
0x13e000ec: 266, // es-PR
|
||||
0x13e000f1: 267, // es-PY
|
||||
0x13e0011a: 268, // es-SV
|
||||
0x13e00135: 269, // es-US
|
||||
0x13e00136: 270, // es-UY
|
||||
0x13e0013b: 271, // es-VE
|
||||
0x14000000: 272, // et
|
||||
0x1400006a: 273, // et-EE
|
||||
0x14500000: 274, // eu
|
||||
0x1450006e: 275, // eu-ES
|
||||
0x14600000: 276, // ewo
|
||||
0x14600052: 277, // ewo-CM
|
||||
0x14800000: 278, // fa
|
||||
0x14800024: 279, // fa-AF
|
||||
0x1480009c: 280, // fa-IR
|
||||
0x14e00000: 281, // ff
|
||||
0x14e00052: 282, // ff-CM
|
||||
0x14e00084: 283, // ff-GN
|
||||
0x14e000c9: 284, // ff-MR
|
||||
0x14e00114: 285, // ff-SN
|
||||
0x15100000: 286, // fi
|
||||
0x15100072: 287, // fi-FI
|
||||
0x15300000: 288, // fil
|
||||
0x153000e7: 289, // fil-PH
|
||||
0x15800000: 290, // fo
|
||||
0x15800063: 291, // fo-DK
|
||||
0x15800076: 292, // fo-FO
|
||||
0x15e00000: 293, // fr
|
||||
0x15e00036: 294, // fr-BE
|
||||
0x15e00037: 295, // fr-BF
|
||||
0x15e0003a: 296, // fr-BI
|
||||
0x15e0003b: 297, // fr-BJ
|
||||
0x15e0003c: 298, // fr-BL
|
||||
0x15e00049: 299, // fr-CA
|
||||
0x15e0004b: 300, // fr-CD
|
||||
0x15e0004c: 301, // fr-CF
|
||||
0x15e0004d: 302, // fr-CG
|
||||
0x15e0004e: 303, // fr-CH
|
||||
0x15e0004f: 304, // fr-CI
|
||||
0x15e00052: 305, // fr-CM
|
||||
0x15e00062: 306, // fr-DJ
|
||||
0x15e00067: 307, // fr-DZ
|
||||
0x15e00078: 308, // fr-FR
|
||||
0x15e0007a: 309, // fr-GA
|
||||
0x15e0007e: 310, // fr-GF
|
||||
0x15e00084: 311, // fr-GN
|
||||
0x15e00085: 312, // fr-GP
|
||||
0x15e00086: 313, // fr-GQ
|
||||
0x15e00091: 314, // fr-HT
|
||||
0x15e000a8: 315, // fr-KM
|
||||
0x15e000b7: 316, // fr-LU
|
||||
0x15e000ba: 317, // fr-MA
|
||||
0x15e000bb: 318, // fr-MC
|
||||
0x15e000be: 319, // fr-MF
|
||||
0x15e000bf: 320, // fr-MG
|
||||
0x15e000c3: 321, // fr-ML
|
||||
0x15e000c8: 322, // fr-MQ
|
||||
0x15e000c9: 323, // fr-MR
|
||||
0x15e000cc: 324, // fr-MU
|
||||
0x15e000d3: 325, // fr-NC
|
||||
0x15e000d4: 326, // fr-NE
|
||||
0x15e000e5: 327, // fr-PF
|
||||
0x15e000ea: 328, // fr-PM
|
||||
0x15e00102: 329, // fr-RE
|
||||
0x15e00107: 330, // fr-RW
|
||||
0x15e0010a: 331, // fr-SC
|
||||
0x15e00114: 332, // fr-SN
|
||||
0x15e0011c: 333, // fr-SY
|
||||
0x15e00120: 334, // fr-TD
|
||||
0x15e00122: 335, // fr-TG
|
||||
0x15e00128: 336, // fr-TN
|
||||
0x15e0013f: 337, // fr-VU
|
||||
0x15e00140: 338, // fr-WF
|
||||
0x15e0015f: 339, // fr-YT
|
||||
0x16900000: 340, // fur
|
||||
0x1690009e: 341, // fur-IT
|
||||
0x16d00000: 342, // fy
|
||||
0x16d000d9: 343, // fy-NL
|
||||
0x16e00000: 344, // ga
|
||||
0x16e00096: 345, // ga-IE
|
||||
0x17e00000: 346, // gd
|
||||
0x17e0007b: 347, // gd-GB
|
||||
0x19000000: 348, // gl
|
||||
0x1900006e: 349, // gl-ES
|
||||
0x1a300000: 350, // gsw
|
||||
0x1a30004e: 351, // gsw-CH
|
||||
0x1a300078: 352, // gsw-FR
|
||||
0x1a3000b2: 353, // gsw-LI
|
||||
0x1a400000: 354, // gu
|
||||
0x1a400099: 355, // gu-IN
|
||||
0x1a900000: 356, // guw
|
||||
0x1ab00000: 357, // guz
|
||||
0x1ab000a4: 358, // guz-KE
|
||||
0x1ac00000: 359, // gv
|
||||
0x1ac00098: 360, // gv-IM
|
||||
0x1b400000: 361, // ha
|
||||
0x1b400080: 362, // ha-GH
|
||||
0x1b4000d4: 363, // ha-NE
|
||||
0x1b4000d6: 364, // ha-NG
|
||||
0x1b800000: 365, // haw
|
||||
0x1b800135: 366, // haw-US
|
||||
0x1bc00000: 367, // he
|
||||
0x1bc00097: 368, // he-IL
|
||||
0x1be00000: 369, // hi
|
||||
0x1be00099: 370, // hi-IN
|
||||
0x1d100000: 371, // hr
|
||||
0x1d100033: 372, // hr-BA
|
||||
0x1d100090: 373, // hr-HR
|
||||
0x1d200000: 374, // hsb
|
||||
0x1d200060: 375, // hsb-DE
|
||||
0x1d500000: 376, // hu
|
||||
0x1d500092: 377, // hu-HU
|
||||
0x1d700000: 378, // hy
|
||||
0x1d700028: 379, // hy-AM
|
||||
0x1e100000: 380, // id
|
||||
0x1e100095: 381, // id-ID
|
||||
0x1e700000: 382, // ig
|
||||
0x1e7000d6: 383, // ig-NG
|
||||
0x1ea00000: 384, // ii
|
||||
0x1ea00053: 385, // ii-CN
|
||||
0x1f500000: 386, // io
|
||||
0x1f800000: 387, // is
|
||||
0x1f80009d: 388, // is-IS
|
||||
0x1f900000: 389, // it
|
||||
0x1f90004e: 390, // it-CH
|
||||
0x1f90009e: 391, // it-IT
|
||||
0x1f900113: 392, // it-SM
|
||||
0x1f900138: 393, // it-VA
|
||||
0x1fa00000: 394, // iu
|
||||
0x20000000: 395, // ja
|
||||
0x200000a2: 396, // ja-JP
|
||||
0x20300000: 397, // jbo
|
||||
0x20700000: 398, // jgo
|
||||
0x20700052: 399, // jgo-CM
|
||||
0x20a00000: 400, // jmc
|
||||
0x20a0012f: 401, // jmc-TZ
|
||||
0x20e00000: 402, // jv
|
||||
0x21000000: 403, // ka
|
||||
0x2100007d: 404, // ka-GE
|
||||
0x21200000: 405, // kab
|
||||
0x21200067: 406, // kab-DZ
|
||||
0x21600000: 407, // kaj
|
||||
0x21700000: 408, // kam
|
||||
0x217000a4: 409, // kam-KE
|
||||
0x21f00000: 410, // kcg
|
||||
0x22300000: 411, // kde
|
||||
0x2230012f: 412, // kde-TZ
|
||||
0x22700000: 413, // kea
|
||||
0x2270005a: 414, // kea-CV
|
||||
0x23400000: 415, // khq
|
||||
0x234000c3: 416, // khq-ML
|
||||
0x23900000: 417, // ki
|
||||
0x239000a4: 418, // ki-KE
|
||||
0x24200000: 419, // kk
|
||||
0x242000ae: 420, // kk-KZ
|
||||
0x24400000: 421, // kkj
|
||||
0x24400052: 422, // kkj-CM
|
||||
0x24500000: 423, // kl
|
||||
0x24500082: 424, // kl-GL
|
||||
0x24600000: 425, // kln
|
||||
0x246000a4: 426, // kln-KE
|
||||
0x24a00000: 427, // km
|
||||
0x24a000a6: 428, // km-KH
|
||||
0x25100000: 429, // kn
|
||||
0x25100099: 430, // kn-IN
|
||||
0x25400000: 431, // ko
|
||||
0x254000aa: 432, // ko-KP
|
||||
0x254000ab: 433, // ko-KR
|
||||
0x25600000: 434, // kok
|
||||
0x25600099: 435, // kok-IN
|
||||
0x26a00000: 436, // ks
|
||||
0x26a00099: 437, // ks-IN
|
||||
0x26b00000: 438, // ksb
|
||||
0x26b0012f: 439, // ksb-TZ
|
||||
0x26d00000: 440, // ksf
|
||||
0x26d00052: 441, // ksf-CM
|
||||
0x26e00000: 442, // ksh
|
||||
0x26e00060: 443, // ksh-DE
|
||||
0x27400000: 444, // ku
|
||||
0x28100000: 445, // kw
|
||||
0x2810007b: 446, // kw-GB
|
||||
0x28a00000: 447, // ky
|
||||
0x28a000a5: 448, // ky-KG
|
||||
0x29100000: 449, // lag
|
||||
0x2910012f: 450, // lag-TZ
|
||||
0x29500000: 451, // lb
|
||||
0x295000b7: 452, // lb-LU
|
||||
0x2a300000: 453, // lg
|
||||
0x2a300131: 454, // lg-UG
|
||||
0x2af00000: 455, // lkt
|
||||
0x2af00135: 456, // lkt-US
|
||||
0x2b500000: 457, // ln
|
||||
0x2b50002a: 458, // ln-AO
|
||||
0x2b50004b: 459, // ln-CD
|
||||
0x2b50004c: 460, // ln-CF
|
||||
0x2b50004d: 461, // ln-CG
|
||||
0x2b800000: 462, // lo
|
||||
0x2b8000af: 463, // lo-LA
|
||||
0x2bf00000: 464, // lrc
|
||||
0x2bf0009b: 465, // lrc-IQ
|
||||
0x2bf0009c: 466, // lrc-IR
|
||||
0x2c000000: 467, // lt
|
||||
0x2c0000b6: 468, // lt-LT
|
||||
0x2c200000: 469, // lu
|
||||
0x2c20004b: 470, // lu-CD
|
||||
0x2c400000: 471, // luo
|
||||
0x2c4000a4: 472, // luo-KE
|
||||
0x2c500000: 473, // luy
|
||||
0x2c5000a4: 474, // luy-KE
|
||||
0x2c700000: 475, // lv
|
||||
0x2c7000b8: 476, // lv-LV
|
||||
0x2d100000: 477, // mas
|
||||
0x2d1000a4: 478, // mas-KE
|
||||
0x2d10012f: 479, // mas-TZ
|
||||
0x2e900000: 480, // mer
|
||||
0x2e9000a4: 481, // mer-KE
|
||||
0x2ed00000: 482, // mfe
|
||||
0x2ed000cc: 483, // mfe-MU
|
||||
0x2f100000: 484, // mg
|
||||
0x2f1000bf: 485, // mg-MG
|
||||
0x2f200000: 486, // mgh
|
||||
0x2f2000d1: 487, // mgh-MZ
|
||||
0x2f400000: 488, // mgo
|
||||
0x2f400052: 489, // mgo-CM
|
||||
0x2ff00000: 490, // mk
|
||||
0x2ff000c2: 491, // mk-MK
|
||||
0x30400000: 492, // ml
|
||||
0x30400099: 493, // ml-IN
|
||||
0x30b00000: 494, // mn
|
||||
0x30b000c5: 495, // mn-MN
|
||||
0x31b00000: 496, // mr
|
||||
0x31b00099: 497, // mr-IN
|
||||
0x31f00000: 498, // ms
|
||||
0x31f0003e: 499, // ms-BN
|
||||
0x31f000d0: 500, // ms-MY
|
||||
0x31f0010d: 501, // ms-SG
|
||||
0x32000000: 502, // mt
|
||||
0x320000cb: 503, // mt-MT
|
||||
0x32500000: 504, // mua
|
||||
0x32500052: 505, // mua-CM
|
||||
0x33100000: 506, // my
|
||||
0x331000c4: 507, // my-MM
|
||||
0x33a00000: 508, // mzn
|
||||
0x33a0009c: 509, // mzn-IR
|
||||
0x34100000: 510, // nah
|
||||
0x34500000: 511, // naq
|
||||
0x345000d2: 512, // naq-NA
|
||||
0x34700000: 513, // nb
|
||||
0x347000da: 514, // nb-NO
|
||||
0x34700110: 515, // nb-SJ
|
||||
0x34e00000: 516, // nd
|
||||
0x34e00164: 517, // nd-ZW
|
||||
0x35000000: 518, // nds
|
||||
0x35000060: 519, // nds-DE
|
||||
0x350000d9: 520, // nds-NL
|
||||
0x35100000: 521, // ne
|
||||
0x35100099: 522, // ne-IN
|
||||
0x351000db: 523, // ne-NP
|
||||
0x36700000: 524, // nl
|
||||
0x36700030: 525, // nl-AW
|
||||
0x36700036: 526, // nl-BE
|
||||
0x36700040: 527, // nl-BQ
|
||||
0x3670005b: 528, // nl-CW
|
||||
0x367000d9: 529, // nl-NL
|
||||
0x36700116: 530, // nl-SR
|
||||
0x3670011b: 531, // nl-SX
|
||||
0x36800000: 532, // nmg
|
||||
0x36800052: 533, // nmg-CM
|
||||
0x36a00000: 534, // nn
|
||||
0x36a000da: 535, // nn-NO
|
||||
0x36c00000: 536, // nnh
|
||||
0x36c00052: 537, // nnh-CM
|
||||
0x36f00000: 538, // no
|
||||
0x37500000: 539, // nqo
|
||||
0x37600000: 540, // nr
|
||||
0x37a00000: 541, // nso
|
||||
0x38000000: 542, // nus
|
||||
0x38000117: 543, // nus-SS
|
||||
0x38700000: 544, // ny
|
||||
0x38900000: 545, // nyn
|
||||
0x38900131: 546, // nyn-UG
|
||||
0x39000000: 547, // om
|
||||
0x3900006f: 548, // om-ET
|
||||
0x390000a4: 549, // om-KE
|
||||
0x39500000: 550, // or
|
||||
0x39500099: 551, // or-IN
|
||||
0x39800000: 552, // os
|
||||
0x3980007d: 553, // os-GE
|
||||
0x39800106: 554, // os-RU
|
||||
0x39d00000: 555, // pa
|
||||
0x39d05000: 556, // pa-Arab
|
||||
0x39d050e8: 557, // pa-Arab-PK
|
||||
0x39d33000: 558, // pa-Guru
|
||||
0x39d33099: 559, // pa-Guru-IN
|
||||
0x3a100000: 560, // pap
|
||||
0x3b300000: 561, // pl
|
||||
0x3b3000e9: 562, // pl-PL
|
||||
0x3bd00000: 563, // prg
|
||||
0x3bd00001: 564, // prg-001
|
||||
0x3be00000: 565, // ps
|
||||
0x3be00024: 566, // ps-AF
|
||||
0x3c000000: 567, // pt
|
||||
0x3c00002a: 568, // pt-AO
|
||||
0x3c000041: 569, // pt-BR
|
||||
0x3c00004e: 570, // pt-CH
|
||||
0x3c00005a: 571, // pt-CV
|
||||
0x3c000086: 572, // pt-GQ
|
||||
0x3c00008b: 573, // pt-GW
|
||||
0x3c0000b7: 574, // pt-LU
|
||||
0x3c0000c6: 575, // pt-MO
|
||||
0x3c0000d1: 576, // pt-MZ
|
||||
0x3c0000ee: 577, // pt-PT
|
||||
0x3c000118: 578, // pt-ST
|
||||
0x3c000126: 579, // pt-TL
|
||||
0x3c400000: 580, // qu
|
||||
0x3c40003f: 581, // qu-BO
|
||||
0x3c400069: 582, // qu-EC
|
||||
0x3c4000e4: 583, // qu-PE
|
||||
0x3d400000: 584, // rm
|
||||
0x3d40004e: 585, // rm-CH
|
||||
0x3d900000: 586, // rn
|
||||
0x3d90003a: 587, // rn-BI
|
||||
0x3dc00000: 588, // ro
|
||||
0x3dc000bc: 589, // ro-MD
|
||||
0x3dc00104: 590, // ro-RO
|
||||
0x3de00000: 591, // rof
|
||||
0x3de0012f: 592, // rof-TZ
|
||||
0x3e200000: 593, // ru
|
||||
0x3e200047: 594, // ru-BY
|
||||
0x3e2000a5: 595, // ru-KG
|
||||
0x3e2000ae: 596, // ru-KZ
|
||||
0x3e2000bc: 597, // ru-MD
|
||||
0x3e200106: 598, // ru-RU
|
||||
0x3e200130: 599, // ru-UA
|
||||
0x3e500000: 600, // rw
|
||||
0x3e500107: 601, // rw-RW
|
||||
0x3e600000: 602, // rwk
|
||||
0x3e60012f: 603, // rwk-TZ
|
||||
0x3eb00000: 604, // sah
|
||||
0x3eb00106: 605, // sah-RU
|
||||
0x3ec00000: 606, // saq
|
||||
0x3ec000a4: 607, // saq-KE
|
||||
0x3f300000: 608, // sbp
|
||||
0x3f30012f: 609, // sbp-TZ
|
||||
0x3fa00000: 610, // sd
|
||||
0x3fa000e8: 611, // sd-PK
|
||||
0x3fc00000: 612, // sdh
|
||||
0x3fd00000: 613, // se
|
||||
0x3fd00072: 614, // se-FI
|
||||
0x3fd000da: 615, // se-NO
|
||||
0x3fd0010c: 616, // se-SE
|
||||
0x3ff00000: 617, // seh
|
||||
0x3ff000d1: 618, // seh-MZ
|
||||
0x40100000: 619, // ses
|
||||
0x401000c3: 620, // ses-ML
|
||||
0x40200000: 621, // sg
|
||||
0x4020004c: 622, // sg-CF
|
||||
0x40800000: 623, // shi
|
||||
0x40857000: 624, // shi-Latn
|
||||
0x408570ba: 625, // shi-Latn-MA
|
||||
0x408dc000: 626, // shi-Tfng
|
||||
0x408dc0ba: 627, // shi-Tfng-MA
|
||||
0x40c00000: 628, // si
|
||||
0x40c000b3: 629, // si-LK
|
||||
0x41200000: 630, // sk
|
||||
0x41200111: 631, // sk-SK
|
||||
0x41600000: 632, // sl
|
||||
0x4160010f: 633, // sl-SI
|
||||
0x41c00000: 634, // sma
|
||||
0x41d00000: 635, // smi
|
||||
0x41e00000: 636, // smj
|
||||
0x41f00000: 637, // smn
|
||||
0x41f00072: 638, // smn-FI
|
||||
0x42200000: 639, // sms
|
||||
0x42300000: 640, // sn
|
||||
0x42300164: 641, // sn-ZW
|
||||
0x42900000: 642, // so
|
||||
0x42900062: 643, // so-DJ
|
||||
0x4290006f: 644, // so-ET
|
||||
0x429000a4: 645, // so-KE
|
||||
0x42900115: 646, // so-SO
|
||||
0x43100000: 647, // sq
|
||||
0x43100027: 648, // sq-AL
|
||||
0x431000c2: 649, // sq-MK
|
||||
0x4310014d: 650, // sq-XK
|
||||
0x43200000: 651, // sr
|
||||
0x4321f000: 652, // sr-Cyrl
|
||||
0x4321f033: 653, // sr-Cyrl-BA
|
||||
0x4321f0bd: 654, // sr-Cyrl-ME
|
||||
0x4321f105: 655, // sr-Cyrl-RS
|
||||
0x4321f14d: 656, // sr-Cyrl-XK
|
||||
0x43257000: 657, // sr-Latn
|
||||
0x43257033: 658, // sr-Latn-BA
|
||||
0x432570bd: 659, // sr-Latn-ME
|
||||
0x43257105: 660, // sr-Latn-RS
|
||||
0x4325714d: 661, // sr-Latn-XK
|
||||
0x43700000: 662, // ss
|
||||
0x43a00000: 663, // ssy
|
||||
0x43b00000: 664, // st
|
||||
0x44400000: 665, // sv
|
||||
0x44400031: 666, // sv-AX
|
||||
0x44400072: 667, // sv-FI
|
||||
0x4440010c: 668, // sv-SE
|
||||
0x44500000: 669, // sw
|
||||
0x4450004b: 670, // sw-CD
|
||||
0x445000a4: 671, // sw-KE
|
||||
0x4450012f: 672, // sw-TZ
|
||||
0x44500131: 673, // sw-UG
|
||||
0x44e00000: 674, // syr
|
||||
0x45000000: 675, // ta
|
||||
0x45000099: 676, // ta-IN
|
||||
0x450000b3: 677, // ta-LK
|
||||
0x450000d0: 678, // ta-MY
|
||||
0x4500010d: 679, // ta-SG
|
||||
0x46100000: 680, // te
|
||||
0x46100099: 681, // te-IN
|
||||
0x46400000: 682, // teo
|
||||
0x464000a4: 683, // teo-KE
|
||||
0x46400131: 684, // teo-UG
|
||||
0x46700000: 685, // tg
|
||||
0x46700124: 686, // tg-TJ
|
||||
0x46b00000: 687, // th
|
||||
0x46b00123: 688, // th-TH
|
||||
0x46f00000: 689, // ti
|
||||
0x46f0006d: 690, // ti-ER
|
||||
0x46f0006f: 691, // ti-ET
|
||||
0x47100000: 692, // tig
|
||||
0x47600000: 693, // tk
|
||||
0x47600127: 694, // tk-TM
|
||||
0x48000000: 695, // tn
|
||||
0x48200000: 696, // to
|
||||
0x48200129: 697, // to-TO
|
||||
0x48a00000: 698, // tr
|
||||
0x48a0005d: 699, // tr-CY
|
||||
0x48a0012b: 700, // tr-TR
|
||||
0x48e00000: 701, // ts
|
||||
0x49400000: 702, // tt
|
||||
0x49400106: 703, // tt-RU
|
||||
0x4a400000: 704, // twq
|
||||
0x4a4000d4: 705, // twq-NE
|
||||
0x4a900000: 706, // tzm
|
||||
0x4a9000ba: 707, // tzm-MA
|
||||
0x4ac00000: 708, // ug
|
||||
0x4ac00053: 709, // ug-CN
|
||||
0x4ae00000: 710, // uk
|
||||
0x4ae00130: 711, // uk-UA
|
||||
0x4b400000: 712, // ur
|
||||
0x4b400099: 713, // ur-IN
|
||||
0x4b4000e8: 714, // ur-PK
|
||||
0x4bc00000: 715, // uz
|
||||
0x4bc05000: 716, // uz-Arab
|
||||
0x4bc05024: 717, // uz-Arab-AF
|
||||
0x4bc1f000: 718, // uz-Cyrl
|
||||
0x4bc1f137: 719, // uz-Cyrl-UZ
|
||||
0x4bc57000: 720, // uz-Latn
|
||||
0x4bc57137: 721, // uz-Latn-UZ
|
||||
0x4be00000: 722, // vai
|
||||
0x4be57000: 723, // vai-Latn
|
||||
0x4be570b4: 724, // vai-Latn-LR
|
||||
0x4bee3000: 725, // vai-Vaii
|
||||
0x4bee30b4: 726, // vai-Vaii-LR
|
||||
0x4c000000: 727, // ve
|
||||
0x4c300000: 728, // vi
|
||||
0x4c30013e: 729, // vi-VN
|
||||
0x4c900000: 730, // vo
|
||||
0x4c900001: 731, // vo-001
|
||||
0x4cc00000: 732, // vun
|
||||
0x4cc0012f: 733, // vun-TZ
|
||||
0x4ce00000: 734, // wa
|
||||
0x4cf00000: 735, // wae
|
||||
0x4cf0004e: 736, // wae-CH
|
||||
0x4e500000: 737, // wo
|
||||
0x4e500114: 738, // wo-SN
|
||||
0x4f200000: 739, // xh
|
||||
0x4fb00000: 740, // xog
|
||||
0x4fb00131: 741, // xog-UG
|
||||
0x50900000: 742, // yav
|
||||
0x50900052: 743, // yav-CM
|
||||
0x51200000: 744, // yi
|
||||
0x51200001: 745, // yi-001
|
||||
0x51800000: 746, // yo
|
||||
0x5180003b: 747, // yo-BJ
|
||||
0x518000d6: 748, // yo-NG
|
||||
0x51f00000: 749, // yue
|
||||
0x51f38000: 750, // yue-Hans
|
||||
0x51f38053: 751, // yue-Hans-CN
|
||||
0x51f39000: 752, // yue-Hant
|
||||
0x51f3908d: 753, // yue-Hant-HK
|
||||
0x52800000: 754, // zgh
|
||||
0x528000ba: 755, // zgh-MA
|
||||
0x52900000: 756, // zh
|
||||
0x52938000: 757, // zh-Hans
|
||||
0x52938053: 758, // zh-Hans-CN
|
||||
0x5293808d: 759, // zh-Hans-HK
|
||||
0x529380c6: 760, // zh-Hans-MO
|
||||
0x5293810d: 761, // zh-Hans-SG
|
||||
0x52939000: 762, // zh-Hant
|
||||
0x5293908d: 763, // zh-Hant-HK
|
||||
0x529390c6: 764, // zh-Hant-MO
|
||||
0x5293912e: 765, // zh-Hant-TW
|
||||
0x52f00000: 766, // zu
|
||||
0x52f00161: 767, // zu-ZA
|
||||
}
|
||||
|
||||
// Total table size 4676 bytes (4KiB); checksum: 17BE3673
|
907
vendor/golang.org/x/text/language/language.go
generated
vendored
907
vendor/golang.org/x/text/language/language.go
generated
vendored
@ -1,907 +0,0 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:generate go run gen.go gen_common.go -output tables.go
|
||||
//go:generate go run gen_index.go
|
||||
|
||||
package language
|
||||
|
||||
// TODO: Remove above NOTE after:
|
||||
// - verifying that tables are dropped correctly (most notably matcher tables).
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
// maxCoreSize is the maximum size of a BCP 47 tag without variants and
|
||||
// extensions. Equals max lang (3) + script (4) + max reg (3) + 2 dashes.
|
||||
maxCoreSize = 12
|
||||
|
||||
// max99thPercentileSize is a somewhat arbitrary buffer size that presumably
|
||||
// is large enough to hold at least 99% of the BCP 47 tags.
|
||||
max99thPercentileSize = 32
|
||||
|
||||
// maxSimpleUExtensionSize is the maximum size of a -u extension with one
|
||||
// key-type pair. Equals len("-u-") + key (2) + dash + max value (8).
|
||||
maxSimpleUExtensionSize = 14
|
||||
)
|
||||
|
||||
// Tag represents a BCP 47 language tag. It is used to specify an instance of a
|
||||
// specific language or locale. All language tag values are guaranteed to be
|
||||
// well-formed.
|
||||
type Tag struct {
|
||||
lang langID
|
||||
region regionID
|
||||
// TODO: we will soon run out of positions for script. Idea: instead of
|
||||
// storing lang, region, and script codes, store only the compact index and
|
||||
// have a lookup table from this code to its expansion. This greatly speeds
|
||||
// up table lookup, speed up common variant cases.
|
||||
// This will also immediately free up 3 extra bytes. Also, the pVariant
|
||||
// field can now be moved to the lookup table, as the compact index uniquely
|
||||
// determines the offset of a possible variant.
|
||||
script scriptID
|
||||
pVariant byte // offset in str, includes preceding '-'
|
||||
pExt uint16 // offset of first extension, includes preceding '-'
|
||||
|
||||
// str is the string representation of the Tag. It will only be used if the
|
||||
// tag has variants or extensions.
|
||||
str string
|
||||
}
|
||||
|
||||
// Make is a convenience wrapper for Parse that omits the error.
|
||||
// In case of an error, a sensible default is returned.
|
||||
func Make(s string) Tag {
|
||||
return Default.Make(s)
|
||||
}
|
||||
|
||||
// Make is a convenience wrapper for c.Parse that omits the error.
|
||||
// In case of an error, a sensible default is returned.
|
||||
func (c CanonType) Make(s string) Tag {
|
||||
t, _ := c.Parse(s)
|
||||
return t
|
||||
}
|
||||
|
||||
// Raw returns the raw base language, script and region, without making an
|
||||
// attempt to infer their values.
|
||||
func (t Tag) Raw() (b Base, s Script, r Region) {
|
||||
return Base{t.lang}, Script{t.script}, Region{t.region}
|
||||
}
|
||||
|
||||
// equalTags compares language, script and region subtags only.
|
||||
func (t Tag) equalTags(a Tag) bool {
|
||||
return t.lang == a.lang && t.script == a.script && t.region == a.region
|
||||
}
|
||||
|
||||
// IsRoot returns true if t is equal to language "und".
|
||||
func (t Tag) IsRoot() bool {
|
||||
if int(t.pVariant) < len(t.str) {
|
||||
return false
|
||||
}
|
||||
return t.equalTags(und)
|
||||
}
|
||||
|
||||
// private reports whether the Tag consists solely of a private use tag.
|
||||
func (t Tag) private() bool {
|
||||
return t.str != "" && t.pVariant == 0
|
||||
}
|
||||
|
||||
// CanonType can be used to enable or disable various types of canonicalization.
|
||||
type CanonType int
|
||||
|
||||
const (
|
||||
// Replace deprecated base languages with their preferred replacements.
|
||||
DeprecatedBase CanonType = 1 << iota
|
||||
// Replace deprecated scripts with their preferred replacements.
|
||||
DeprecatedScript
|
||||
// Replace deprecated regions with their preferred replacements.
|
||||
DeprecatedRegion
|
||||
// Remove redundant scripts.
|
||||
SuppressScript
|
||||
// Normalize legacy encodings. This includes legacy languages defined in
|
||||
// CLDR as well as bibliographic codes defined in ISO-639.
|
||||
Legacy
|
||||
// Map the dominant language of a macro language group to the macro language
|
||||
// subtag. For example cmn -> zh.
|
||||
Macro
|
||||
// The CLDR flag should be used if full compatibility with CLDR is required.
|
||||
// There are a few cases where language.Tag may differ from CLDR. To follow all
|
||||
// of CLDR's suggestions, use All|CLDR.
|
||||
CLDR
|
||||
|
||||
// Raw can be used to Compose or Parse without Canonicalization.
|
||||
Raw CanonType = 0
|
||||
|
||||
// Replace all deprecated tags with their preferred replacements.
|
||||
Deprecated = DeprecatedBase | DeprecatedScript | DeprecatedRegion
|
||||
|
||||
// All canonicalizations recommended by BCP 47.
|
||||
BCP47 = Deprecated | SuppressScript
|
||||
|
||||
// All canonicalizations.
|
||||
All = BCP47 | Legacy | Macro
|
||||
|
||||
// Default is the canonicalization used by Parse, Make and Compose. To
|
||||
// preserve as much information as possible, canonicalizations that remove
|
||||
// potentially valuable information are not included. The Matcher is
|
||||
// designed to recognize similar tags that would be the same if
|
||||
// they were canonicalized using All.
|
||||
Default = Deprecated | Legacy
|
||||
|
||||
canonLang = DeprecatedBase | Legacy | Macro
|
||||
|
||||
// TODO: LikelyScript, LikelyRegion: suppress similar to ICU.
|
||||
)
|
||||
|
||||
// canonicalize returns the canonicalized equivalent of the tag and
|
||||
// whether there was any change.
|
||||
func (t Tag) canonicalize(c CanonType) (Tag, bool) {
|
||||
if c == Raw {
|
||||
return t, false
|
||||
}
|
||||
changed := false
|
||||
if c&SuppressScript != 0 {
|
||||
if t.lang < langNoIndexOffset && uint8(t.script) == suppressScript[t.lang] {
|
||||
t.script = 0
|
||||
changed = true
|
||||
}
|
||||
}
|
||||
if c&canonLang != 0 {
|
||||
for {
|
||||
if l, aliasType := normLang(t.lang); l != t.lang {
|
||||
switch aliasType {
|
||||
case langLegacy:
|
||||
if c&Legacy != 0 {
|
||||
if t.lang == _sh && t.script == 0 {
|
||||
t.script = _Latn
|
||||
}
|
||||
t.lang = l
|
||||
changed = true
|
||||
}
|
||||
case langMacro:
|
||||
if c&Macro != 0 {
|
||||
// We deviate here from CLDR. The mapping "nb" -> "no"
|
||||
// qualifies as a typical Macro language mapping. However,
|
||||
// for legacy reasons, CLDR maps "no", the macro language
|
||||
// code for Norwegian, to the dominant variant "nb". This
|
||||
// change is currently under consideration for CLDR as well.
|
||||
// See http://unicode.org/cldr/trac/ticket/2698 and also
|
||||
// http://unicode.org/cldr/trac/ticket/1790 for some of the
|
||||
// practical implications. TODO: this check could be removed
|
||||
// if CLDR adopts this change.
|
||||
if c&CLDR == 0 || t.lang != _nb {
|
||||
changed = true
|
||||
t.lang = l
|
||||
}
|
||||
}
|
||||
case langDeprecated:
|
||||
if c&DeprecatedBase != 0 {
|
||||
if t.lang == _mo && t.region == 0 {
|
||||
t.region = _MD
|
||||
}
|
||||
t.lang = l
|
||||
changed = true
|
||||
// Other canonicalization types may still apply.
|
||||
continue
|
||||
}
|
||||
}
|
||||
} else if c&Legacy != 0 && t.lang == _no && c&CLDR != 0 {
|
||||
t.lang = _nb
|
||||
changed = true
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
if c&DeprecatedScript != 0 {
|
||||
if t.script == _Qaai {
|
||||
changed = true
|
||||
t.script = _Zinh
|
||||
}
|
||||
}
|
||||
if c&DeprecatedRegion != 0 {
|
||||
if r := normRegion(t.region); r != 0 {
|
||||
changed = true
|
||||
t.region = r
|
||||
}
|
||||
}
|
||||
return t, changed
|
||||
}
|
||||
|
||||
// Canonicalize returns the canonicalized equivalent of the tag.
|
||||
func (c CanonType) Canonicalize(t Tag) (Tag, error) {
|
||||
t, changed := t.canonicalize(c)
|
||||
if changed {
|
||||
t.remakeString()
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// Confidence indicates the level of certainty for a given return value.
|
||||
// For example, Serbian may be written in Cyrillic or Latin script.
|
||||
// The confidence level indicates whether a value was explicitly specified,
|
||||
// whether it is typically the only possible value, or whether there is
|
||||
// an ambiguity.
|
||||
type Confidence int
|
||||
|
||||
const (
|
||||
No Confidence = iota // full confidence that there was no match
|
||||
Low // most likely value picked out of a set of alternatives
|
||||
High // value is generally assumed to be the correct match
|
||||
Exact // exact match or explicitly specified value
|
||||
)
|
||||
|
||||
var confName = []string{"No", "Low", "High", "Exact"}
|
||||
|
||||
func (c Confidence) String() string {
|
||||
return confName[c]
|
||||
}
|
||||
|
||||
// remakeString is used to update t.str in case lang, script or region changed.
|
||||
// It is assumed that pExt and pVariant still point to the start of the
|
||||
// respective parts.
|
||||
func (t *Tag) remakeString() {
|
||||
if t.str == "" {
|
||||
return
|
||||
}
|
||||
extra := t.str[t.pVariant:]
|
||||
if t.pVariant > 0 {
|
||||
extra = extra[1:]
|
||||
}
|
||||
if t.equalTags(und) && strings.HasPrefix(extra, "x-") {
|
||||
t.str = extra
|
||||
t.pVariant = 0
|
||||
t.pExt = 0
|
||||
return
|
||||
}
|
||||
var buf [max99thPercentileSize]byte // avoid extra memory allocation in most cases.
|
||||
b := buf[:t.genCoreBytes(buf[:])]
|
||||
if extra != "" {
|
||||
diff := len(b) - int(t.pVariant)
|
||||
b = append(b, '-')
|
||||
b = append(b, extra...)
|
||||
t.pVariant = uint8(int(t.pVariant) + diff)
|
||||
t.pExt = uint16(int(t.pExt) + diff)
|
||||
} else {
|
||||
t.pVariant = uint8(len(b))
|
||||
t.pExt = uint16(len(b))
|
||||
}
|
||||
t.str = string(b)
|
||||
}
|
||||
|
||||
// genCoreBytes writes a string for the base languages, script and region tags
|
||||
// to the given buffer and returns the number of bytes written. It will never
|
||||
// write more than maxCoreSize bytes.
|
||||
func (t *Tag) genCoreBytes(buf []byte) int {
|
||||
n := t.lang.stringToBuf(buf[:])
|
||||
if t.script != 0 {
|
||||
n += copy(buf[n:], "-")
|
||||
n += copy(buf[n:], t.script.String())
|
||||
}
|
||||
if t.region != 0 {
|
||||
n += copy(buf[n:], "-")
|
||||
n += copy(buf[n:], t.region.String())
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// String returns the canonical string representation of the language tag.
|
||||
func (t Tag) String() string {
|
||||
if t.str != "" {
|
||||
return t.str
|
||||
}
|
||||
if t.script == 0 && t.region == 0 {
|
||||
return t.lang.String()
|
||||
}
|
||||
buf := [maxCoreSize]byte{}
|
||||
return string(buf[:t.genCoreBytes(buf[:])])
|
||||
}
|
||||
|
||||
// MarshalText implements encoding.TextMarshaler.
|
||||
func (t Tag) MarshalText() (text []byte, err error) {
|
||||
if t.str != "" {
|
||||
text = append(text, t.str...)
|
||||
} else if t.script == 0 && t.region == 0 {
|
||||
text = append(text, t.lang.String()...)
|
||||
} else {
|
||||
buf := [maxCoreSize]byte{}
|
||||
text = buf[:t.genCoreBytes(buf[:])]
|
||||
}
|
||||
return text, nil
|
||||
}
|
||||
|
||||
// UnmarshalText implements encoding.TextUnmarshaler.
|
||||
func (t *Tag) UnmarshalText(text []byte) error {
|
||||
tag, err := Raw.Parse(string(text))
|
||||
*t = tag
|
||||
return err
|
||||
}
|
||||
|
||||
// Base returns the base language of the language tag. If the base language is
|
||||
// unspecified, an attempt will be made to infer it from the context.
|
||||
// It uses a variant of CLDR's Add Likely Subtags algorithm. This is subject to change.
|
||||
func (t Tag) Base() (Base, Confidence) {
|
||||
if t.lang != 0 {
|
||||
return Base{t.lang}, Exact
|
||||
}
|
||||
c := High
|
||||
if t.script == 0 && !(Region{t.region}).IsCountry() {
|
||||
c = Low
|
||||
}
|
||||
if tag, err := addTags(t); err == nil && tag.lang != 0 {
|
||||
return Base{tag.lang}, c
|
||||
}
|
||||
return Base{0}, No
|
||||
}
|
||||
|
||||
// Script infers the script for the language tag. If it was not explicitly given, it will infer
|
||||
// a most likely candidate.
|
||||
// If more than one script is commonly used for a language, the most likely one
|
||||
// is returned with a low confidence indication. For example, it returns (Cyrl, Low)
|
||||
// for Serbian.
|
||||
// If a script cannot be inferred (Zzzz, No) is returned. We do not use Zyyy (undetermined)
|
||||
// as one would suspect from the IANA registry for BCP 47. In a Unicode context Zyyy marks
|
||||
// common characters (like 1, 2, 3, '.', etc.) and is therefore more like multiple scripts.
|
||||
// See http://www.unicode.org/reports/tr24/#Values for more details. Zzzz is also used for
|
||||
// unknown value in CLDR. (Zzzz, Exact) is returned if Zzzz was explicitly specified.
|
||||
// Note that an inferred script is never guaranteed to be the correct one. Latin is
|
||||
// almost exclusively used for Afrikaans, but Arabic has been used for some texts
|
||||
// in the past. Also, the script that is commonly used may change over time.
|
||||
// It uses a variant of CLDR's Add Likely Subtags algorithm. This is subject to change.
|
||||
func (t Tag) Script() (Script, Confidence) {
|
||||
if t.script != 0 {
|
||||
return Script{t.script}, Exact
|
||||
}
|
||||
sc, c := scriptID(_Zzzz), No
|
||||
if t.lang < langNoIndexOffset {
|
||||
if scr := scriptID(suppressScript[t.lang]); scr != 0 {
|
||||
// Note: it is not always the case that a language with a suppress
|
||||
// script value is only written in one script (e.g. kk, ms, pa).
|
||||
if t.region == 0 {
|
||||
return Script{scriptID(scr)}, High
|
||||
}
|
||||
sc, c = scr, High
|
||||
}
|
||||
}
|
||||
if tag, err := addTags(t); err == nil {
|
||||
if tag.script != sc {
|
||||
sc, c = tag.script, Low
|
||||
}
|
||||
} else {
|
||||
t, _ = (Deprecated | Macro).Canonicalize(t)
|
||||
if tag, err := addTags(t); err == nil && tag.script != sc {
|
||||
sc, c = tag.script, Low
|
||||
}
|
||||
}
|
||||
return Script{sc}, c
|
||||
}
|
||||
|
||||
// Region returns the region for the language tag. If it was not explicitly given, it will
|
||||
// infer a most likely candidate from the context.
|
||||
// It uses a variant of CLDR's Add Likely Subtags algorithm. This is subject to change.
|
||||
func (t Tag) Region() (Region, Confidence) {
|
||||
if t.region != 0 {
|
||||
return Region{t.region}, Exact
|
||||
}
|
||||
if t, err := addTags(t); err == nil {
|
||||
return Region{t.region}, Low // TODO: differentiate between high and low.
|
||||
}
|
||||
t, _ = (Deprecated | Macro).Canonicalize(t)
|
||||
if tag, err := addTags(t); err == nil {
|
||||
return Region{tag.region}, Low
|
||||
}
|
||||
return Region{_ZZ}, No // TODO: return world instead of undetermined?
|
||||
}
|
||||
|
||||
// Variant returns the variants specified explicitly for this language tag.
|
||||
// or nil if no variant was specified.
|
||||
func (t Tag) Variants() []Variant {
|
||||
v := []Variant{}
|
||||
if int(t.pVariant) < int(t.pExt) {
|
||||
for x, str := "", t.str[t.pVariant:t.pExt]; str != ""; {
|
||||
x, str = nextToken(str)
|
||||
v = append(v, Variant{x})
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Parent returns the CLDR parent of t. In CLDR, missing fields in data for a
|
||||
// specific language are substituted with fields from the parent language.
|
||||
// The parent for a language may change for newer versions of CLDR.
|
||||
func (t Tag) Parent() Tag {
|
||||
if t.str != "" {
|
||||
// Strip the variants and extensions.
|
||||
t, _ = Raw.Compose(t.Raw())
|
||||
if t.region == 0 && t.script != 0 && t.lang != 0 {
|
||||
base, _ := addTags(Tag{lang: t.lang})
|
||||
if base.script == t.script {
|
||||
return Tag{lang: t.lang}
|
||||
}
|
||||
}
|
||||
return t
|
||||
}
|
||||
if t.lang != 0 {
|
||||
if t.region != 0 {
|
||||
maxScript := t.script
|
||||
if maxScript == 0 {
|
||||
max, _ := addTags(t)
|
||||
maxScript = max.script
|
||||
}
|
||||
|
||||
for i := range parents {
|
||||
if langID(parents[i].lang) == t.lang && scriptID(parents[i].maxScript) == maxScript {
|
||||
for _, r := range parents[i].fromRegion {
|
||||
if regionID(r) == t.region {
|
||||
return Tag{
|
||||
lang: t.lang,
|
||||
script: scriptID(parents[i].script),
|
||||
region: regionID(parents[i].toRegion),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Strip the script if it is the default one.
|
||||
base, _ := addTags(Tag{lang: t.lang})
|
||||
if base.script != maxScript {
|
||||
return Tag{lang: t.lang, script: maxScript}
|
||||
}
|
||||
return Tag{lang: t.lang}
|
||||
} else if t.script != 0 {
|
||||
// The parent for an base-script pair with a non-default script is
|
||||
// "und" instead of the base language.
|
||||
base, _ := addTags(Tag{lang: t.lang})
|
||||
if base.script != t.script {
|
||||
return und
|
||||
}
|
||||
return Tag{lang: t.lang}
|
||||
}
|
||||
}
|
||||
return und
|
||||
}
|
||||
|
||||
// returns token t and the rest of the string.
|
||||
func nextToken(s string) (t, tail string) {
|
||||
p := strings.Index(s[1:], "-")
|
||||
if p == -1 {
|
||||
return s[1:], ""
|
||||
}
|
||||
p++
|
||||
return s[1:p], s[p:]
|
||||
}
|
||||
|
||||
// Extension is a single BCP 47 extension.
|
||||
type Extension struct {
|
||||
s string
|
||||
}
|
||||
|
||||
// String returns the string representation of the extension, including the
|
||||
// type tag.
|
||||
func (e Extension) String() string {
|
||||
return e.s
|
||||
}
|
||||
|
||||
// ParseExtension parses s as an extension and returns it on success.
|
||||
func ParseExtension(s string) (e Extension, err error) {
|
||||
scan := makeScannerString(s)
|
||||
var end int
|
||||
if n := len(scan.token); n != 1 {
|
||||
return Extension{}, errSyntax
|
||||
}
|
||||
scan.toLower(0, len(scan.b))
|
||||
end = parseExtension(&scan)
|
||||
if end != len(s) {
|
||||
return Extension{}, errSyntax
|
||||
}
|
||||
return Extension{string(scan.b)}, nil
|
||||
}
|
||||
|
||||
// Type returns the one-byte extension type of e. It returns 0 for the zero
|
||||
// exception.
|
||||
func (e Extension) Type() byte {
|
||||
if e.s == "" {
|
||||
return 0
|
||||
}
|
||||
return e.s[0]
|
||||
}
|
||||
|
||||
// Tokens returns the list of tokens of e.
|
||||
func (e Extension) Tokens() []string {
|
||||
return strings.Split(e.s, "-")
|
||||
}
|
||||
|
||||
// Extension returns the extension of type x for tag t. It will return
|
||||
// false for ok if t does not have the requested extension. The returned
|
||||
// extension will be invalid in this case.
|
||||
func (t Tag) Extension(x byte) (ext Extension, ok bool) {
|
||||
for i := int(t.pExt); i < len(t.str)-1; {
|
||||
var ext string
|
||||
i, ext = getExtension(t.str, i)
|
||||
if ext[0] == x {
|
||||
return Extension{ext}, true
|
||||
}
|
||||
}
|
||||
return Extension{}, false
|
||||
}
|
||||
|
||||
// Extensions returns all extensions of t.
|
||||
func (t Tag) Extensions() []Extension {
|
||||
e := []Extension{}
|
||||
for i := int(t.pExt); i < len(t.str)-1; {
|
||||
var ext string
|
||||
i, ext = getExtension(t.str, i)
|
||||
e = append(e, Extension{ext})
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// TypeForKey returns the type associated with the given key, where key and type
|
||||
// are of the allowed values defined for the Unicode locale extension ('u') in
|
||||
// http://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers.
|
||||
// TypeForKey will traverse the inheritance chain to get the correct value.
|
||||
func (t Tag) TypeForKey(key string) string {
|
||||
if start, end, _ := t.findTypeForKey(key); end != start {
|
||||
return t.str[start:end]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
var (
|
||||
errPrivateUse = errors.New("cannot set a key on a private use tag")
|
||||
errInvalidArguments = errors.New("invalid key or type")
|
||||
)
|
||||
|
||||
// SetTypeForKey returns a new Tag with the key set to type, where key and type
|
||||
// are of the allowed values defined for the Unicode locale extension ('u') in
|
||||
// http://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers.
|
||||
// An empty value removes an existing pair with the same key.
|
||||
func (t Tag) SetTypeForKey(key, value string) (Tag, error) {
|
||||
if t.private() {
|
||||
return t, errPrivateUse
|
||||
}
|
||||
if len(key) != 2 {
|
||||
return t, errInvalidArguments
|
||||
}
|
||||
|
||||
// Remove the setting if value is "".
|
||||
if value == "" {
|
||||
start, end, _ := t.findTypeForKey(key)
|
||||
if start != end {
|
||||
// Remove key tag and leading '-'.
|
||||
start -= 4
|
||||
|
||||
// Remove a possible empty extension.
|
||||
if (end == len(t.str) || t.str[end+2] == '-') && t.str[start-2] == '-' {
|
||||
start -= 2
|
||||
}
|
||||
if start == int(t.pVariant) && end == len(t.str) {
|
||||
t.str = ""
|
||||
t.pVariant, t.pExt = 0, 0
|
||||
} else {
|
||||
t.str = fmt.Sprintf("%s%s", t.str[:start], t.str[end:])
|
||||
}
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
if len(value) < 3 || len(value) > 8 {
|
||||
return t, errInvalidArguments
|
||||
}
|
||||
|
||||
var (
|
||||
buf [maxCoreSize + maxSimpleUExtensionSize]byte
|
||||
uStart int // start of the -u extension.
|
||||
)
|
||||
|
||||
// Generate the tag string if needed.
|
||||
if t.str == "" {
|
||||
uStart = t.genCoreBytes(buf[:])
|
||||
buf[uStart] = '-'
|
||||
uStart++
|
||||
}
|
||||
|
||||
// Create new key-type pair and parse it to verify.
|
||||
b := buf[uStart:]
|
||||
copy(b, "u-")
|
||||
copy(b[2:], key)
|
||||
b[4] = '-'
|
||||
b = b[:5+copy(b[5:], value)]
|
||||
scan := makeScanner(b)
|
||||
if parseExtensions(&scan); scan.err != nil {
|
||||
return t, scan.err
|
||||
}
|
||||
|
||||
// Assemble the replacement string.
|
||||
if t.str == "" {
|
||||
t.pVariant, t.pExt = byte(uStart-1), uint16(uStart-1)
|
||||
t.str = string(buf[:uStart+len(b)])
|
||||
} else {
|
||||
s := t.str
|
||||
start, end, hasExt := t.findTypeForKey(key)
|
||||
if start == end {
|
||||
if hasExt {
|
||||
b = b[2:]
|
||||
}
|
||||
t.str = fmt.Sprintf("%s-%s%s", s[:start], b, s[end:])
|
||||
} else {
|
||||
t.str = fmt.Sprintf("%s%s%s", s[:start], value, s[end:])
|
||||
}
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// findKeyAndType returns the start and end position for the type corresponding
|
||||
// to key or the point at which to insert the key-value pair if the type
|
||||
// wasn't found. The hasExt return value reports whether an -u extension was present.
|
||||
// Note: the extensions are typically very small and are likely to contain
|
||||
// only one key-type pair.
|
||||
func (t Tag) findTypeForKey(key string) (start, end int, hasExt bool) {
|
||||
p := int(t.pExt)
|
||||
if len(key) != 2 || p == len(t.str) || p == 0 {
|
||||
return p, p, false
|
||||
}
|
||||
s := t.str
|
||||
|
||||
// Find the correct extension.
|
||||
for p++; s[p] != 'u'; p++ {
|
||||
if s[p] > 'u' {
|
||||
p--
|
||||
return p, p, false
|
||||
}
|
||||
if p = nextExtension(s, p); p == len(s) {
|
||||
return len(s), len(s), false
|
||||
}
|
||||
}
|
||||
// Proceed to the hyphen following the extension name.
|
||||
p++
|
||||
|
||||
// curKey is the key currently being processed.
|
||||
curKey := ""
|
||||
|
||||
// Iterate over keys until we get the end of a section.
|
||||
for {
|
||||
// p points to the hyphen preceding the current token.
|
||||
if p3 := p + 3; s[p3] == '-' {
|
||||
// Found a key.
|
||||
// Check whether we just processed the key that was requested.
|
||||
if curKey == key {
|
||||
return start, p, true
|
||||
}
|
||||
// Set to the next key and continue scanning type tokens.
|
||||
curKey = s[p+1 : p3]
|
||||
if curKey > key {
|
||||
return p, p, true
|
||||
}
|
||||
// Start of the type token sequence.
|
||||
start = p + 4
|
||||
// A type is at least 3 characters long.
|
||||
p += 7 // 4 + 3
|
||||
} else {
|
||||
// Attribute or type, which is at least 3 characters long.
|
||||
p += 4
|
||||
}
|
||||
// p points past the third character of a type or attribute.
|
||||
max := p + 5 // maximum length of token plus hyphen.
|
||||
if len(s) < max {
|
||||
max = len(s)
|
||||
}
|
||||
for ; p < max && s[p] != '-'; p++ {
|
||||
}
|
||||
// Bail if we have exhausted all tokens or if the next token starts
|
||||
// a new extension.
|
||||
if p == len(s) || s[p+2] == '-' {
|
||||
if curKey == key {
|
||||
return start, p, true
|
||||
}
|
||||
return p, p, true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// CompactIndex returns an index, where 0 <= index < NumCompactTags, for tags
|
||||
// for which data exists in the text repository. The index will change over time
|
||||
// and should not be stored in persistent storage. Extensions, except for the
|
||||
// 'va' type of the 'u' extension, are ignored. It will return 0, false if no
|
||||
// compact tag exists, where 0 is the index for the root language (Und).
|
||||
func CompactIndex(t Tag) (index int, ok bool) {
|
||||
// TODO: perhaps give more frequent tags a lower index.
|
||||
// TODO: we could make the indexes stable. This will excluded some
|
||||
// possibilities for optimization, so don't do this quite yet.
|
||||
b, s, r := t.Raw()
|
||||
if len(t.str) > 0 {
|
||||
if strings.HasPrefix(t.str, "x-") {
|
||||
// We have no entries for user-defined tags.
|
||||
return 0, false
|
||||
}
|
||||
if uint16(t.pVariant) != t.pExt {
|
||||
// There are no tags with variants and an u-va type.
|
||||
if t.TypeForKey("va") != "" {
|
||||
return 0, false
|
||||
}
|
||||
t, _ = Raw.Compose(b, s, r, t.Variants())
|
||||
} else if _, ok := t.Extension('u'); ok {
|
||||
// Strip all but the 'va' entry.
|
||||
variant := t.TypeForKey("va")
|
||||
t, _ = Raw.Compose(b, s, r)
|
||||
t, _ = t.SetTypeForKey("va", variant)
|
||||
}
|
||||
if len(t.str) > 0 {
|
||||
// We have some variants.
|
||||
for i, s := range specialTags {
|
||||
if s == t {
|
||||
return i + 1, true
|
||||
}
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
}
|
||||
// No variants specified: just compare core components.
|
||||
// The key has the form lllssrrr, where l, s, and r are nibbles for
|
||||
// respectively the langID, scriptID, and regionID.
|
||||
key := uint32(b.langID) << (8 + 12)
|
||||
key |= uint32(s.scriptID) << 12
|
||||
key |= uint32(r.regionID)
|
||||
x, ok := coreTags[key]
|
||||
return int(x), ok
|
||||
}
|
||||
|
||||
// Base is an ISO 639 language code, used for encoding the base language
|
||||
// of a language tag.
|
||||
type Base struct {
|
||||
langID
|
||||
}
|
||||
|
||||
// ParseBase parses a 2- or 3-letter ISO 639 code.
|
||||
// It returns a ValueError if s is a well-formed but unknown language identifier
|
||||
// or another error if another error occurred.
|
||||
func ParseBase(s string) (Base, error) {
|
||||
if n := len(s); n < 2 || 3 < n {
|
||||
return Base{}, errSyntax
|
||||
}
|
||||
var buf [3]byte
|
||||
l, err := getLangID(buf[:copy(buf[:], s)])
|
||||
return Base{l}, err
|
||||
}
|
||||
|
||||
// Script is a 4-letter ISO 15924 code for representing scripts.
|
||||
// It is idiomatically represented in title case.
|
||||
type Script struct {
|
||||
scriptID
|
||||
}
|
||||
|
||||
// ParseScript parses a 4-letter ISO 15924 code.
|
||||
// It returns a ValueError if s is a well-formed but unknown script identifier
|
||||
// or another error if another error occurred.
|
||||
func ParseScript(s string) (Script, error) {
|
||||
if len(s) != 4 {
|
||||
return Script{}, errSyntax
|
||||
}
|
||||
var buf [4]byte
|
||||
sc, err := getScriptID(script, buf[:copy(buf[:], s)])
|
||||
return Script{sc}, err
|
||||
}
|
||||
|
||||
// Region is an ISO 3166-1 or UN M.49 code for representing countries and regions.
|
||||
type Region struct {
|
||||
regionID
|
||||
}
|
||||
|
||||
// EncodeM49 returns the Region for the given UN M.49 code.
|
||||
// It returns an error if r is not a valid code.
|
||||
func EncodeM49(r int) (Region, error) {
|
||||
rid, err := getRegionM49(r)
|
||||
return Region{rid}, err
|
||||
}
|
||||
|
||||
// ParseRegion parses a 2- or 3-letter ISO 3166-1 or a UN M.49 code.
|
||||
// It returns a ValueError if s is a well-formed but unknown region identifier
|
||||
// or another error if another error occurred.
|
||||
func ParseRegion(s string) (Region, error) {
|
||||
if n := len(s); n < 2 || 3 < n {
|
||||
return Region{}, errSyntax
|
||||
}
|
||||
var buf [3]byte
|
||||
r, err := getRegionID(buf[:copy(buf[:], s)])
|
||||
return Region{r}, err
|
||||
}
|
||||
|
||||
// IsCountry returns whether this region is a country or autonomous area. This
|
||||
// includes non-standard definitions from CLDR.
|
||||
func (r Region) IsCountry() bool {
|
||||
if r.regionID == 0 || r.IsGroup() || r.IsPrivateUse() && r.regionID != _XK {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// IsGroup returns whether this region defines a collection of regions. This
|
||||
// includes non-standard definitions from CLDR.
|
||||
func (r Region) IsGroup() bool {
|
||||
if r.regionID == 0 {
|
||||
return false
|
||||
}
|
||||
return int(regionInclusion[r.regionID]) < len(regionContainment)
|
||||
}
|
||||
|
||||
// Contains returns whether Region c is contained by Region r. It returns true
|
||||
// if c == r.
|
||||
func (r Region) Contains(c Region) bool {
|
||||
return r.regionID.contains(c.regionID)
|
||||
}
|
||||
|
||||
func (r regionID) contains(c regionID) bool {
|
||||
if r == c {
|
||||
return true
|
||||
}
|
||||
g := regionInclusion[r]
|
||||
if g >= nRegionGroups {
|
||||
return false
|
||||
}
|
||||
m := regionContainment[g]
|
||||
|
||||
d := regionInclusion[c]
|
||||
b := regionInclusionBits[d]
|
||||
|
||||
// A contained country may belong to multiple disjoint groups. Matching any
|
||||
// of these indicates containment. If the contained region is a group, it
|
||||
// must strictly be a subset.
|
||||
if d >= nRegionGroups {
|
||||
return b&m != 0
|
||||
}
|
||||
return b&^m == 0
|
||||
}
|
||||
|
||||
var errNoTLD = errors.New("language: region is not a valid ccTLD")
|
||||
|
||||
// TLD returns the country code top-level domain (ccTLD). UK is returned for GB.
|
||||
// In all other cases it returns either the region itself or an error.
|
||||
//
|
||||
// This method may return an error for a region for which there exists a
|
||||
// canonical form with a ccTLD. To get that ccTLD canonicalize r first. The
|
||||
// region will already be canonicalized it was obtained from a Tag that was
|
||||
// obtained using any of the default methods.
|
||||
func (r Region) TLD() (Region, error) {
|
||||
// See http://en.wikipedia.org/wiki/Country_code_top-level_domain for the
|
||||
// difference between ISO 3166-1 and IANA ccTLD.
|
||||
if r.regionID == _GB {
|
||||
r = Region{_UK}
|
||||
}
|
||||
if (r.typ() & ccTLD) == 0 {
|
||||
return Region{}, errNoTLD
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// Canonicalize returns the region or a possible replacement if the region is
|
||||
// deprecated. It will not return a replacement for deprecated regions that
|
||||
// are split into multiple regions.
|
||||
func (r Region) Canonicalize() Region {
|
||||
if cr := normRegion(r.regionID); cr != 0 {
|
||||
return Region{cr}
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// Variant represents a registered variant of a language as defined by BCP 47.
|
||||
type Variant struct {
|
||||
variant string
|
||||
}
|
||||
|
||||
// ParseVariant parses and returns a Variant. An error is returned if s is not
|
||||
// a valid variant.
|
||||
func ParseVariant(s string) (Variant, error) {
|
||||
s = strings.ToLower(s)
|
||||
if _, ok := variantIndex[s]; ok {
|
||||
return Variant{s}, nil
|
||||
}
|
||||
return Variant{}, mkErrInvalid([]byte(s))
|
||||
}
|
||||
|
||||
// String returns the string representation of the variant.
|
||||
func (v Variant) String() string {
|
||||
return v.variant
|
||||
}
|
396
vendor/golang.org/x/text/language/lookup.go
generated
vendored
396
vendor/golang.org/x/text/language/lookup.go
generated
vendored
@ -1,396 +0,0 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package language
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
"golang.org/x/text/internal/tag"
|
||||
)
|
||||
|
||||
// findIndex tries to find the given tag in idx and returns a standardized error
|
||||
// if it could not be found.
|
||||
func findIndex(idx tag.Index, key []byte, form string) (index int, err error) {
|
||||
if !tag.FixCase(form, key) {
|
||||
return 0, errSyntax
|
||||
}
|
||||
i := idx.Index(key)
|
||||
if i == -1 {
|
||||
return 0, mkErrInvalid(key)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func searchUint(imap []uint16, key uint16) int {
|
||||
return sort.Search(len(imap), func(i int) bool {
|
||||
return imap[i] >= key
|
||||
})
|
||||
}
|
||||
|
||||
type langID uint16
|
||||
|
||||
// getLangID returns the langID of s if s is a canonical subtag
|
||||
// or langUnknown if s is not a canonical subtag.
|
||||
func getLangID(s []byte) (langID, error) {
|
||||
if len(s) == 2 {
|
||||
return getLangISO2(s)
|
||||
}
|
||||
return getLangISO3(s)
|
||||
}
|
||||
|
||||
// mapLang returns the mapped langID of id according to mapping m.
|
||||
func normLang(id langID) (langID, langAliasType) {
|
||||
k := sort.Search(len(langAliasMap), func(i int) bool {
|
||||
return langAliasMap[i].from >= uint16(id)
|
||||
})
|
||||
if k < len(langAliasMap) && langAliasMap[k].from == uint16(id) {
|
||||
return langID(langAliasMap[k].to), langAliasTypes[k]
|
||||
}
|
||||
return id, langAliasTypeUnknown
|
||||
}
|
||||
|
||||
// getLangISO2 returns the langID for the given 2-letter ISO language code
|
||||
// or unknownLang if this does not exist.
|
||||
func getLangISO2(s []byte) (langID, error) {
|
||||
if !tag.FixCase("zz", s) {
|
||||
return 0, errSyntax
|
||||
}
|
||||
if i := lang.Index(s); i != -1 && lang.Elem(i)[3] != 0 {
|
||||
return langID(i), nil
|
||||
}
|
||||
return 0, mkErrInvalid(s)
|
||||
}
|
||||
|
||||
const base = 'z' - 'a' + 1
|
||||
|
||||
func strToInt(s []byte) uint {
|
||||
v := uint(0)
|
||||
for i := 0; i < len(s); i++ {
|
||||
v *= base
|
||||
v += uint(s[i] - 'a')
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// converts the given integer to the original ASCII string passed to strToInt.
|
||||
// len(s) must match the number of characters obtained.
|
||||
func intToStr(v uint, s []byte) {
|
||||
for i := len(s) - 1; i >= 0; i-- {
|
||||
s[i] = byte(v%base) + 'a'
|
||||
v /= base
|
||||
}
|
||||
}
|
||||
|
||||
// getLangISO3 returns the langID for the given 3-letter ISO language code
|
||||
// or unknownLang if this does not exist.
|
||||
func getLangISO3(s []byte) (langID, error) {
|
||||
if tag.FixCase("und", s) {
|
||||
// first try to match canonical 3-letter entries
|
||||
for i := lang.Index(s[:2]); i != -1; i = lang.Next(s[:2], i) {
|
||||
if e := lang.Elem(i); e[3] == 0 && e[2] == s[2] {
|
||||
// We treat "und" as special and always translate it to "unspecified".
|
||||
// Note that ZZ and Zzzz are private use and are not treated as
|
||||
// unspecified by default.
|
||||
id := langID(i)
|
||||
if id == nonCanonicalUnd {
|
||||
return 0, nil
|
||||
}
|
||||
return id, nil
|
||||
}
|
||||
}
|
||||
if i := altLangISO3.Index(s); i != -1 {
|
||||
return langID(altLangIndex[altLangISO3.Elem(i)[3]]), nil
|
||||
}
|
||||
n := strToInt(s)
|
||||
if langNoIndex[n/8]&(1<<(n%8)) != 0 {
|
||||
return langID(n) + langNoIndexOffset, nil
|
||||
}
|
||||
// Check for non-canonical uses of ISO3.
|
||||
for i := lang.Index(s[:1]); i != -1; i = lang.Next(s[:1], i) {
|
||||
if e := lang.Elem(i); e[2] == s[1] && e[3] == s[2] {
|
||||
return langID(i), nil
|
||||
}
|
||||
}
|
||||
return 0, mkErrInvalid(s)
|
||||
}
|
||||
return 0, errSyntax
|
||||
}
|
||||
|
||||
// stringToBuf writes the string to b and returns the number of bytes
|
||||
// written. cap(b) must be >= 3.
|
||||
func (id langID) stringToBuf(b []byte) int {
|
||||
if id >= langNoIndexOffset {
|
||||
intToStr(uint(id)-langNoIndexOffset, b[:3])
|
||||
return 3
|
||||
} else if id == 0 {
|
||||
return copy(b, "und")
|
||||
}
|
||||
l := lang[id<<2:]
|
||||
if l[3] == 0 {
|
||||
return copy(b, l[:3])
|
||||
}
|
||||
return copy(b, l[:2])
|
||||
}
|
||||
|
||||
// String returns the BCP 47 representation of the langID.
|
||||
// Use b as variable name, instead of id, to ensure the variable
|
||||
// used is consistent with that of Base in which this type is embedded.
|
||||
func (b langID) String() string {
|
||||
if b == 0 {
|
||||
return "und"
|
||||
} else if b >= langNoIndexOffset {
|
||||
b -= langNoIndexOffset
|
||||
buf := [3]byte{}
|
||||
intToStr(uint(b), buf[:])
|
||||
return string(buf[:])
|
||||
}
|
||||
l := lang.Elem(int(b))
|
||||
if l[3] == 0 {
|
||||
return l[:3]
|
||||
}
|
||||
return l[:2]
|
||||
}
|
||||
|
||||
// ISO3 returns the ISO 639-3 language code.
|
||||
func (b langID) ISO3() string {
|
||||
if b == 0 || b >= langNoIndexOffset {
|
||||
return b.String()
|
||||
}
|
||||
l := lang.Elem(int(b))
|
||||
if l[3] == 0 {
|
||||
return l[:3]
|
||||
} else if l[2] == 0 {
|
||||
return altLangISO3.Elem(int(l[3]))[:3]
|
||||
}
|
||||
// This allocation will only happen for 3-letter ISO codes
|
||||
// that are non-canonical BCP 47 language identifiers.
|
||||
return l[0:1] + l[2:4]
|
||||
}
|
||||
|
||||
// IsPrivateUse reports whether this language code is reserved for private use.
|
||||
func (b langID) IsPrivateUse() bool {
|
||||
return langPrivateStart <= b && b <= langPrivateEnd
|
||||
}
|
||||
|
||||
type regionID uint16
|
||||
|
||||
// getRegionID returns the region id for s if s is a valid 2-letter region code
|
||||
// or unknownRegion.
|
||||
func getRegionID(s []byte) (regionID, error) {
|
||||
if len(s) == 3 {
|
||||
if isAlpha(s[0]) {
|
||||
return getRegionISO3(s)
|
||||
}
|
||||
if i, err := strconv.ParseUint(string(s), 10, 10); err == nil {
|
||||
return getRegionM49(int(i))
|
||||
}
|
||||
}
|
||||
return getRegionISO2(s)
|
||||
}
|
||||
|
||||
// getRegionISO2 returns the regionID for the given 2-letter ISO country code
|
||||
// or unknownRegion if this does not exist.
|
||||
func getRegionISO2(s []byte) (regionID, error) {
|
||||
i, err := findIndex(regionISO, s, "ZZ")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return regionID(i) + isoRegionOffset, nil
|
||||
}
|
||||
|
||||
// getRegionISO3 returns the regionID for the given 3-letter ISO country code
|
||||
// or unknownRegion if this does not exist.
|
||||
func getRegionISO3(s []byte) (regionID, error) {
|
||||
if tag.FixCase("ZZZ", s) {
|
||||
for i := regionISO.Index(s[:1]); i != -1; i = regionISO.Next(s[:1], i) {
|
||||
if e := regionISO.Elem(i); e[2] == s[1] && e[3] == s[2] {
|
||||
return regionID(i) + isoRegionOffset, nil
|
||||
}
|
||||
}
|
||||
for i := 0; i < len(altRegionISO3); i += 3 {
|
||||
if tag.Compare(altRegionISO3[i:i+3], s) == 0 {
|
||||
return regionID(altRegionIDs[i/3]), nil
|
||||
}
|
||||
}
|
||||
return 0, mkErrInvalid(s)
|
||||
}
|
||||
return 0, errSyntax
|
||||
}
|
||||
|
||||
func getRegionM49(n int) (regionID, error) {
|
||||
if 0 < n && n <= 999 {
|
||||
const (
|
||||
searchBits = 7
|
||||
regionBits = 9
|
||||
regionMask = 1<<regionBits - 1
|
||||
)
|
||||
idx := n >> searchBits
|
||||
buf := fromM49[m49Index[idx]:m49Index[idx+1]]
|
||||
val := uint16(n) << regionBits // we rely on bits shifting out
|
||||
i := sort.Search(len(buf), func(i int) bool {
|
||||
return buf[i] >= val
|
||||
})
|
||||
if r := fromM49[int(m49Index[idx])+i]; r&^regionMask == val {
|
||||
return regionID(r & regionMask), nil
|
||||
}
|
||||
}
|
||||
var e ValueError
|
||||
fmt.Fprint(bytes.NewBuffer([]byte(e.v[:])), n)
|
||||
return 0, e
|
||||
}
|
||||
|
||||
// normRegion returns a region if r is deprecated or 0 otherwise.
|
||||
// TODO: consider supporting BYS (-> BLR), CSK (-> 200 or CZ), PHI (-> PHL) and AFI (-> DJ).
|
||||
// TODO: consider mapping split up regions to new most populous one (like CLDR).
|
||||
func normRegion(r regionID) regionID {
|
||||
m := regionOldMap
|
||||
k := sort.Search(len(m), func(i int) bool {
|
||||
return m[i].from >= uint16(r)
|
||||
})
|
||||
if k < len(m) && m[k].from == uint16(r) {
|
||||
return regionID(m[k].to)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
const (
|
||||
iso3166UserAssigned = 1 << iota
|
||||
ccTLD
|
||||
bcp47Region
|
||||
)
|
||||
|
||||
func (r regionID) typ() byte {
|
||||
return regionTypes[r]
|
||||
}
|
||||
|
||||
// String returns the BCP 47 representation for the region.
|
||||
// It returns "ZZ" for an unspecified region.
|
||||
func (r regionID) String() string {
|
||||
if r < isoRegionOffset {
|
||||
if r == 0 {
|
||||
return "ZZ"
|
||||
}
|
||||
return fmt.Sprintf("%03d", r.M49())
|
||||
}
|
||||
r -= isoRegionOffset
|
||||
return regionISO.Elem(int(r))[:2]
|
||||
}
|
||||
|
||||
// ISO3 returns the 3-letter ISO code of r.
|
||||
// Note that not all regions have a 3-letter ISO code.
|
||||
// In such cases this method returns "ZZZ".
|
||||
func (r regionID) ISO3() string {
|
||||
if r < isoRegionOffset {
|
||||
return "ZZZ"
|
||||
}
|
||||
r -= isoRegionOffset
|
||||
reg := regionISO.Elem(int(r))
|
||||
switch reg[2] {
|
||||
case 0:
|
||||
return altRegionISO3[reg[3]:][:3]
|
||||
case ' ':
|
||||
return "ZZZ"
|
||||
}
|
||||
return reg[0:1] + reg[2:4]
|
||||
}
|
||||
|
||||
// M49 returns the UN M.49 encoding of r, or 0 if this encoding
|
||||
// is not defined for r.
|
||||
func (r regionID) M49() int {
|
||||
return int(m49[r])
|
||||
}
|
||||
|
||||
// IsPrivateUse reports whether r has the ISO 3166 User-assigned status. This
|
||||
// may include private-use tags that are assigned by CLDR and used in this
|
||||
// implementation. So IsPrivateUse and IsCountry can be simultaneously true.
|
||||
func (r regionID) IsPrivateUse() bool {
|
||||
return r.typ()&iso3166UserAssigned != 0
|
||||
}
|
||||
|
||||
type scriptID uint8
|
||||
|
||||
// getScriptID returns the script id for string s. It assumes that s
|
||||
// is of the format [A-Z][a-z]{3}.
|
||||
func getScriptID(idx tag.Index, s []byte) (scriptID, error) {
|
||||
i, err := findIndex(idx, s, "Zzzz")
|
||||
return scriptID(i), err
|
||||
}
|
||||
|
||||
// String returns the script code in title case.
|
||||
// It returns "Zzzz" for an unspecified script.
|
||||
func (s scriptID) String() string {
|
||||
if s == 0 {
|
||||
return "Zzzz"
|
||||
}
|
||||
return script.Elem(int(s))
|
||||
}
|
||||
|
||||
// IsPrivateUse reports whether this script code is reserved for private use.
|
||||
func (s scriptID) IsPrivateUse() bool {
|
||||
return _Qaaa <= s && s <= _Qabx
|
||||
}
|
||||
|
||||
const (
|
||||
maxAltTaglen = len("en-US-POSIX")
|
||||
maxLen = maxAltTaglen
|
||||
)
|
||||
|
||||
var (
|
||||
// grandfatheredMap holds a mapping from legacy and grandfathered tags to
|
||||
// their base language or index to more elaborate tag.
|
||||
grandfatheredMap = map[[maxLen]byte]int16{
|
||||
[maxLen]byte{'a', 'r', 't', '-', 'l', 'o', 'j', 'b', 'a', 'n'}: _jbo, // art-lojban
|
||||
[maxLen]byte{'i', '-', 'a', 'm', 'i'}: _ami, // i-ami
|
||||
[maxLen]byte{'i', '-', 'b', 'n', 'n'}: _bnn, // i-bnn
|
||||
[maxLen]byte{'i', '-', 'h', 'a', 'k'}: _hak, // i-hak
|
||||
[maxLen]byte{'i', '-', 'k', 'l', 'i', 'n', 'g', 'o', 'n'}: _tlh, // i-klingon
|
||||
[maxLen]byte{'i', '-', 'l', 'u', 'x'}: _lb, // i-lux
|
||||
[maxLen]byte{'i', '-', 'n', 'a', 'v', 'a', 'j', 'o'}: _nv, // i-navajo
|
||||
[maxLen]byte{'i', '-', 'p', 'w', 'n'}: _pwn, // i-pwn
|
||||
[maxLen]byte{'i', '-', 't', 'a', 'o'}: _tao, // i-tao
|
||||
[maxLen]byte{'i', '-', 't', 'a', 'y'}: _tay, // i-tay
|
||||
[maxLen]byte{'i', '-', 't', 's', 'u'}: _tsu, // i-tsu
|
||||
[maxLen]byte{'n', 'o', '-', 'b', 'o', 'k'}: _nb, // no-bok
|
||||
[maxLen]byte{'n', 'o', '-', 'n', 'y', 'n'}: _nn, // no-nyn
|
||||
[maxLen]byte{'s', 'g', 'n', '-', 'b', 'e', '-', 'f', 'r'}: _sfb, // sgn-BE-FR
|
||||
[maxLen]byte{'s', 'g', 'n', '-', 'b', 'e', '-', 'n', 'l'}: _vgt, // sgn-BE-NL
|
||||
[maxLen]byte{'s', 'g', 'n', '-', 'c', 'h', '-', 'd', 'e'}: _sgg, // sgn-CH-DE
|
||||
[maxLen]byte{'z', 'h', '-', 'g', 'u', 'o', 'y', 'u'}: _cmn, // zh-guoyu
|
||||
[maxLen]byte{'z', 'h', '-', 'h', 'a', 'k', 'k', 'a'}: _hak, // zh-hakka
|
||||
[maxLen]byte{'z', 'h', '-', 'm', 'i', 'n', '-', 'n', 'a', 'n'}: _nan, // zh-min-nan
|
||||
[maxLen]byte{'z', 'h', '-', 'x', 'i', 'a', 'n', 'g'}: _hsn, // zh-xiang
|
||||
|
||||
// Grandfathered tags with no modern replacement will be converted as
|
||||
// follows:
|
||||
[maxLen]byte{'c', 'e', 'l', '-', 'g', 'a', 'u', 'l', 'i', 's', 'h'}: -1, // cel-gaulish
|
||||
[maxLen]byte{'e', 'n', '-', 'g', 'b', '-', 'o', 'e', 'd'}: -2, // en-GB-oed
|
||||
[maxLen]byte{'i', '-', 'd', 'e', 'f', 'a', 'u', 'l', 't'}: -3, // i-default
|
||||
[maxLen]byte{'i', '-', 'e', 'n', 'o', 'c', 'h', 'i', 'a', 'n'}: -4, // i-enochian
|
||||
[maxLen]byte{'i', '-', 'm', 'i', 'n', 'g', 'o'}: -5, // i-mingo
|
||||
[maxLen]byte{'z', 'h', '-', 'm', 'i', 'n'}: -6, // zh-min
|
||||
|
||||
// CLDR-specific tag.
|
||||
[maxLen]byte{'r', 'o', 'o', 't'}: 0, // root
|
||||
[maxLen]byte{'e', 'n', '-', 'u', 's', '-', 'p', 'o', 's', 'i', 'x'}: -7, // en_US_POSIX"
|
||||
}
|
||||
|
||||
altTagIndex = [...]uint8{0, 17, 31, 45, 61, 74, 86, 102}
|
||||
|
||||
altTags = "xtg-x-cel-gaulishen-GB-oxendicten-x-i-defaultund-x-i-enochiansee-x-i-mingonan-x-zh-minen-US-u-va-posix"
|
||||
)
|
||||
|
||||
func grandfathered(s [maxAltTaglen]byte) (t Tag, ok bool) {
|
||||
if v, ok := grandfatheredMap[s]; ok {
|
||||
if v < 0 {
|
||||
return Make(altTags[altTagIndex[-v-1]:altTagIndex[-v]]), true
|
||||
}
|
||||
t.lang = langID(v)
|
||||
return t, true
|
||||
}
|
||||
return t, false
|
||||
}
|
933
vendor/golang.org/x/text/language/match.go
generated
vendored
933
vendor/golang.org/x/text/language/match.go
generated
vendored
@ -1,933 +0,0 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package language
|
||||
|
||||
import "errors"
|
||||
|
||||
// A MatchOption configures a Matcher.
|
||||
type MatchOption func(*matcher)
|
||||
|
||||
// PreferSameScript will, in the absence of a match, result in the first
|
||||
// preferred tag with the same script as a supported tag to match this supported
|
||||
// tag. The default is currently true, but this may change in the future.
|
||||
func PreferSameScript(preferSame bool) MatchOption {
|
||||
return func(m *matcher) { m.preferSameScript = preferSame }
|
||||
}
|
||||
|
||||
// TODO(v1.0.0): consider making Matcher a concrete type, instead of interface.
|
||||
// There doesn't seem to be too much need for multiple types.
|
||||
// Making it a concrete type allows MatchStrings to be a method, which will
|
||||
// improve its discoverability.
|
||||
|
||||
// MatchStrings parses and matches the given strings until one of them matches
|
||||
// the language in the Matcher. A string may be an Accept-Language header as
|
||||
// handled by ParseAcceptLanguage. The default language is returned if no
|
||||
// other language matched.
|
||||
func MatchStrings(m Matcher, lang ...string) (tag Tag, index int) {
|
||||
for _, accept := range lang {
|
||||
desired, _, err := ParseAcceptLanguage(accept)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if tag, index, conf := m.Match(desired...); conf != No {
|
||||
return tag, index
|
||||
}
|
||||
}
|
||||
tag, index, _ = m.Match()
|
||||
return
|
||||
}
|
||||
|
||||
// Matcher is the interface that wraps the Match method.
|
||||
//
|
||||
// Match returns the best match for any of the given tags, along with
|
||||
// a unique index associated with the returned tag and a confidence
|
||||
// score.
|
||||
type Matcher interface {
|
||||
Match(t ...Tag) (tag Tag, index int, c Confidence)
|
||||
}
|
||||
|
||||
// Comprehends reports the confidence score for a speaker of a given language
|
||||
// to being able to comprehend the written form of an alternative language.
|
||||
func Comprehends(speaker, alternative Tag) Confidence {
|
||||
_, _, c := NewMatcher([]Tag{alternative}).Match(speaker)
|
||||
return c
|
||||
}
|
||||
|
||||
// NewMatcher returns a Matcher that matches an ordered list of preferred tags
|
||||
// against a list of supported tags based on written intelligibility, closeness
|
||||
// of dialect, equivalence of subtags and various other rules. It is initialized
|
||||
// with the list of supported tags. The first element is used as the default
|
||||
// value in case no match is found.
|
||||
//
|
||||
// Its Match method matches the first of the given Tags to reach a certain
|
||||
// confidence threshold. The tags passed to Match should therefore be specified
|
||||
// in order of preference. Extensions are ignored for matching.
|
||||
//
|
||||
// The index returned by the Match method corresponds to the index of the
|
||||
// matched tag in t, but is augmented with the Unicode extension ('u')of the
|
||||
// corresponding preferred tag. This allows user locale options to be passed
|
||||
// transparently.
|
||||
func NewMatcher(t []Tag, options ...MatchOption) Matcher {
|
||||
return newMatcher(t, options)
|
||||
}
|
||||
|
||||
func (m *matcher) Match(want ...Tag) (t Tag, index int, c Confidence) {
|
||||
match, w, c := m.getBest(want...)
|
||||
if match != nil {
|
||||
t, index = match.tag, match.index
|
||||
} else {
|
||||
// TODO: this should be an option
|
||||
t = m.default_.tag
|
||||
if m.preferSameScript {
|
||||
outer:
|
||||
for _, w := range want {
|
||||
script, _ := w.Script()
|
||||
if script.scriptID == 0 {
|
||||
// Don't do anything if there is no script, such as with
|
||||
// private subtags.
|
||||
continue
|
||||
}
|
||||
for i, h := range m.supported {
|
||||
if script.scriptID == h.maxScript {
|
||||
t, index = h.tag, i
|
||||
break outer
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// TODO: select first language tag based on script.
|
||||
}
|
||||
if w.region != 0 && t.region != 0 && t.region.contains(w.region) {
|
||||
t, _ = Raw.Compose(t, Region{w.region})
|
||||
}
|
||||
// Copy options from the user-provided tag into the result tag. This is hard
|
||||
// to do after the fact, so we do it here.
|
||||
// TODO: add in alternative variants to -u-va-.
|
||||
// TODO: add preferred region to -u-rg-.
|
||||
if e := w.Extensions(); len(e) > 0 {
|
||||
t, _ = Raw.Compose(t, e)
|
||||
}
|
||||
return t, index, c
|
||||
}
|
||||
|
||||
type scriptRegionFlags uint8
|
||||
|
||||
const (
|
||||
isList = 1 << iota
|
||||
scriptInFrom
|
||||
regionInFrom
|
||||
)
|
||||
|
||||
func (t *Tag) setUndefinedLang(id langID) {
|
||||
if t.lang == 0 {
|
||||
t.lang = id
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tag) setUndefinedScript(id scriptID) {
|
||||
if t.script == 0 {
|
||||
t.script = id
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tag) setUndefinedRegion(id regionID) {
|
||||
if t.region == 0 || t.region.contains(id) {
|
||||
t.region = id
|
||||
}
|
||||
}
|
||||
|
||||
// ErrMissingLikelyTagsData indicates no information was available
|
||||
// to compute likely values of missing tags.
|
||||
var ErrMissingLikelyTagsData = errors.New("missing likely tags data")
|
||||
|
||||
// addLikelySubtags sets subtags to their most likely value, given the locale.
|
||||
// In most cases this means setting fields for unknown values, but in some
|
||||
// cases it may alter a value. It returns an ErrMissingLikelyTagsData error
|
||||
// if the given locale cannot be expanded.
|
||||
func (t Tag) addLikelySubtags() (Tag, error) {
|
||||
id, err := addTags(t)
|
||||
if err != nil {
|
||||
return t, err
|
||||
} else if id.equalTags(t) {
|
||||
return t, nil
|
||||
}
|
||||
id.remakeString()
|
||||
return id, nil
|
||||
}
|
||||
|
||||
// specializeRegion attempts to specialize a group region.
|
||||
func specializeRegion(t *Tag) bool {
|
||||
if i := regionInclusion[t.region]; i < nRegionGroups {
|
||||
x := likelyRegionGroup[i]
|
||||
if langID(x.lang) == t.lang && scriptID(x.script) == t.script {
|
||||
t.region = regionID(x.region)
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func addTags(t Tag) (Tag, error) {
|
||||
// We leave private use identifiers alone.
|
||||
if t.private() {
|
||||
return t, nil
|
||||
}
|
||||
if t.script != 0 && t.region != 0 {
|
||||
if t.lang != 0 {
|
||||
// already fully specified
|
||||
specializeRegion(&t)
|
||||
return t, nil
|
||||
}
|
||||
// Search matches for und-script-region. Note that for these cases
|
||||
// region will never be a group so there is no need to check for this.
|
||||
list := likelyRegion[t.region : t.region+1]
|
||||
if x := list[0]; x.flags&isList != 0 {
|
||||
list = likelyRegionList[x.lang : x.lang+uint16(x.script)]
|
||||
}
|
||||
for _, x := range list {
|
||||
// Deviating from the spec. See match_test.go for details.
|
||||
if scriptID(x.script) == t.script {
|
||||
t.setUndefinedLang(langID(x.lang))
|
||||
return t, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if t.lang != 0 {
|
||||
// Search matches for lang-script and lang-region, where lang != und.
|
||||
if t.lang < langNoIndexOffset {
|
||||
x := likelyLang[t.lang]
|
||||
if x.flags&isList != 0 {
|
||||
list := likelyLangList[x.region : x.region+uint16(x.script)]
|
||||
if t.script != 0 {
|
||||
for _, x := range list {
|
||||
if scriptID(x.script) == t.script && x.flags&scriptInFrom != 0 {
|
||||
t.setUndefinedRegion(regionID(x.region))
|
||||
return t, nil
|
||||
}
|
||||
}
|
||||
} else if t.region != 0 {
|
||||
count := 0
|
||||
goodScript := true
|
||||
tt := t
|
||||
for _, x := range list {
|
||||
// We visit all entries for which the script was not
|
||||
// defined, including the ones where the region was not
|
||||
// defined. This allows for proper disambiguation within
|
||||
// regions.
|
||||
if x.flags&scriptInFrom == 0 && t.region.contains(regionID(x.region)) {
|
||||
tt.region = regionID(x.region)
|
||||
tt.setUndefinedScript(scriptID(x.script))
|
||||
goodScript = goodScript && tt.script == scriptID(x.script)
|
||||
count++
|
||||
}
|
||||
}
|
||||
if count == 1 {
|
||||
return tt, nil
|
||||
}
|
||||
// Even if we fail to find a unique Region, we might have
|
||||
// an unambiguous script.
|
||||
if goodScript {
|
||||
t.script = tt.script
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Search matches for und-script.
|
||||
if t.script != 0 {
|
||||
x := likelyScript[t.script]
|
||||
if x.region != 0 {
|
||||
t.setUndefinedRegion(regionID(x.region))
|
||||
t.setUndefinedLang(langID(x.lang))
|
||||
return t, nil
|
||||
}
|
||||
}
|
||||
// Search matches for und-region. If und-script-region exists, it would
|
||||
// have been found earlier.
|
||||
if t.region != 0 {
|
||||
if i := regionInclusion[t.region]; i < nRegionGroups {
|
||||
x := likelyRegionGroup[i]
|
||||
if x.region != 0 {
|
||||
t.setUndefinedLang(langID(x.lang))
|
||||
t.setUndefinedScript(scriptID(x.script))
|
||||
t.region = regionID(x.region)
|
||||
}
|
||||
} else {
|
||||
x := likelyRegion[t.region]
|
||||
if x.flags&isList != 0 {
|
||||
x = likelyRegionList[x.lang]
|
||||
}
|
||||
if x.script != 0 && x.flags != scriptInFrom {
|
||||
t.setUndefinedLang(langID(x.lang))
|
||||
t.setUndefinedScript(scriptID(x.script))
|
||||
return t, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Search matches for lang.
|
||||
if t.lang < langNoIndexOffset {
|
||||
x := likelyLang[t.lang]
|
||||
if x.flags&isList != 0 {
|
||||
x = likelyLangList[x.region]
|
||||
}
|
||||
if x.region != 0 {
|
||||
t.setUndefinedScript(scriptID(x.script))
|
||||
t.setUndefinedRegion(regionID(x.region))
|
||||
}
|
||||
specializeRegion(&t)
|
||||
if t.lang == 0 {
|
||||
t.lang = _en // default language
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
return t, ErrMissingLikelyTagsData
|
||||
}
|
||||
|
||||
func (t *Tag) setTagsFrom(id Tag) {
|
||||
t.lang = id.lang
|
||||
t.script = id.script
|
||||
t.region = id.region
|
||||
}
|
||||
|
||||
// minimize removes the region or script subtags from t such that
|
||||
// t.addLikelySubtags() == t.minimize().addLikelySubtags().
|
||||
func (t Tag) minimize() (Tag, error) {
|
||||
t, err := minimizeTags(t)
|
||||
if err != nil {
|
||||
return t, err
|
||||
}
|
||||
t.remakeString()
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// minimizeTags mimics the behavior of the ICU 51 C implementation.
|
||||
func minimizeTags(t Tag) (Tag, error) {
|
||||
if t.equalTags(und) {
|
||||
return t, nil
|
||||
}
|
||||
max, err := addTags(t)
|
||||
if err != nil {
|
||||
return t, err
|
||||
}
|
||||
for _, id := range [...]Tag{
|
||||
{lang: t.lang},
|
||||
{lang: t.lang, region: t.region},
|
||||
{lang: t.lang, script: t.script},
|
||||
} {
|
||||
if x, err := addTags(id); err == nil && max.equalTags(x) {
|
||||
t.setTagsFrom(id)
|
||||
break
|
||||
}
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// Tag Matching
|
||||
// CLDR defines an algorithm for finding the best match between two sets of language
|
||||
// tags. The basic algorithm defines how to score a possible match and then find
|
||||
// the match with the best score
|
||||
// (see http://www.unicode.org/reports/tr35/#LanguageMatching).
|
||||
// Using scoring has several disadvantages. The scoring obfuscates the importance of
|
||||
// the various factors considered, making the algorithm harder to understand. Using
|
||||
// scoring also requires the full score to be computed for each pair of tags.
|
||||
//
|
||||
// We will use a different algorithm which aims to have the following properties:
|
||||
// - clarity on the precedence of the various selection factors, and
|
||||
// - improved performance by allowing early termination of a comparison.
|
||||
//
|
||||
// Matching algorithm (overview)
|
||||
// Input:
|
||||
// - supported: a set of supported tags
|
||||
// - default: the default tag to return in case there is no match
|
||||
// - desired: list of desired tags, ordered by preference, starting with
|
||||
// the most-preferred.
|
||||
//
|
||||
// Algorithm:
|
||||
// 1) Set the best match to the lowest confidence level
|
||||
// 2) For each tag in "desired":
|
||||
// a) For each tag in "supported":
|
||||
// 1) compute the match between the two tags.
|
||||
// 2) if the match is better than the previous best match, replace it
|
||||
// with the new match. (see next section)
|
||||
// b) if the current best match is Exact and pin is true the result will be
|
||||
// frozen to the language found thusfar, although better matches may
|
||||
// still be found for the same language.
|
||||
// 3) If the best match so far is below a certain threshold, return "default".
|
||||
//
|
||||
// Ranking:
|
||||
// We use two phases to determine whether one pair of tags are a better match
|
||||
// than another pair of tags. First, we determine a rough confidence level. If the
|
||||
// levels are different, the one with the highest confidence wins.
|
||||
// Second, if the rough confidence levels are identical, we use a set of tie-breaker
|
||||
// rules.
|
||||
//
|
||||
// The confidence level of matching a pair of tags is determined by finding the
|
||||
// lowest confidence level of any matches of the corresponding subtags (the
|
||||
// result is deemed as good as its weakest link).
|
||||
// We define the following levels:
|
||||
// Exact - An exact match of a subtag, before adding likely subtags.
|
||||
// MaxExact - An exact match of a subtag, after adding likely subtags.
|
||||
// [See Note 2].
|
||||
// High - High level of mutual intelligibility between different subtag
|
||||
// variants.
|
||||
// Low - Low level of mutual intelligibility between different subtag
|
||||
// variants.
|
||||
// No - No mutual intelligibility.
|
||||
//
|
||||
// The following levels can occur for each type of subtag:
|
||||
// Base: Exact, MaxExact, High, Low, No
|
||||
// Script: Exact, MaxExact [see Note 3], Low, No
|
||||
// Region: Exact, MaxExact, High
|
||||
// Variant: Exact, High
|
||||
// Private: Exact, No
|
||||
//
|
||||
// Any result with a confidence level of Low or higher is deemed a possible match.
|
||||
// Once a desired tag matches any of the supported tags with a level of MaxExact
|
||||
// or higher, the next desired tag is not considered (see Step 2.b).
|
||||
// Note that CLDR provides languageMatching data that defines close equivalence
|
||||
// classes for base languages, scripts and regions.
|
||||
//
|
||||
// Tie-breaking
|
||||
// If we get the same confidence level for two matches, we apply a sequence of
|
||||
// tie-breaking rules. The first that succeeds defines the result. The rules are
|
||||
// applied in the following order.
|
||||
// 1) Original language was defined and was identical.
|
||||
// 2) Original region was defined and was identical.
|
||||
// 3) Distance between two maximized regions was the smallest.
|
||||
// 4) Original script was defined and was identical.
|
||||
// 5) Distance from want tag to have tag using the parent relation [see Note 5.]
|
||||
// If there is still no winner after these rules are applied, the first match
|
||||
// found wins.
|
||||
//
|
||||
// Notes:
|
||||
// [2] In practice, as matching of Exact is done in a separate phase from
|
||||
// matching the other levels, we reuse the Exact level to mean MaxExact in
|
||||
// the second phase. As a consequence, we only need the levels defined by
|
||||
// the Confidence type. The MaxExact confidence level is mapped to High in
|
||||
// the public API.
|
||||
// [3] We do not differentiate between maximized script values that were derived
|
||||
// from suppressScript versus most likely tag data. We determined that in
|
||||
// ranking the two, one ranks just after the other. Moreover, the two cannot
|
||||
// occur concurrently. As a consequence, they are identical for practical
|
||||
// purposes.
|
||||
// [4] In case of deprecated, macro-equivalents and legacy mappings, we assign
|
||||
// the MaxExact level to allow iw vs he to still be a closer match than
|
||||
// en-AU vs en-US, for example.
|
||||
// [5] In CLDR a locale inherits fields that are unspecified for this locale
|
||||
// from its parent. Therefore, if a locale is a parent of another locale,
|
||||
// it is a strong measure for closeness, especially when no other tie
|
||||
// breaker rule applies. One could also argue it is inconsistent, for
|
||||
// example, when pt-AO matches pt (which CLDR equates with pt-BR), even
|
||||
// though its parent is pt-PT according to the inheritance rules.
|
||||
//
|
||||
// Implementation Details:
|
||||
// There are several performance considerations worth pointing out. Most notably,
|
||||
// we preprocess as much as possible (within reason) at the time of creation of a
|
||||
// matcher. This includes:
|
||||
// - creating a per-language map, which includes data for the raw base language
|
||||
// and its canonicalized variant (if applicable),
|
||||
// - expanding entries for the equivalence classes defined in CLDR's
|
||||
// languageMatch data.
|
||||
// The per-language map ensures that typically only a very small number of tags
|
||||
// need to be considered. The pre-expansion of canonicalized subtags and
|
||||
// equivalence classes reduces the amount of map lookups that need to be done at
|
||||
// runtime.
|
||||
|
||||
// matcher keeps a set of supported language tags, indexed by language.
|
||||
type matcher struct {
|
||||
default_ *haveTag
|
||||
supported []*haveTag
|
||||
index map[langID]*matchHeader
|
||||
passSettings bool
|
||||
preferSameScript bool
|
||||
}
|
||||
|
||||
// matchHeader has the lists of tags for exact matches and matches based on
|
||||
// maximized and canonicalized tags for a given language.
|
||||
type matchHeader struct {
|
||||
haveTags []*haveTag
|
||||
original bool
|
||||
}
|
||||
|
||||
// haveTag holds a supported Tag and its maximized script and region. The maximized
|
||||
// or canonicalized language is not stored as it is not needed during matching.
|
||||
type haveTag struct {
|
||||
tag Tag
|
||||
|
||||
// index of this tag in the original list of supported tags.
|
||||
index int
|
||||
|
||||
// conf is the maximum confidence that can result from matching this haveTag.
|
||||
// When conf < Exact this means it was inserted after applying a CLDR equivalence rule.
|
||||
conf Confidence
|
||||
|
||||
// Maximized region and script.
|
||||
maxRegion regionID
|
||||
maxScript scriptID
|
||||
|
||||
// altScript may be checked as an alternative match to maxScript. If altScript
|
||||
// matches, the confidence level for this match is Low. Theoretically there
|
||||
// could be multiple alternative scripts. This does not occur in practice.
|
||||
altScript scriptID
|
||||
|
||||
// nextMax is the index of the next haveTag with the same maximized tags.
|
||||
nextMax uint16
|
||||
}
|
||||
|
||||
func makeHaveTag(tag Tag, index int) (haveTag, langID) {
|
||||
max := tag
|
||||
if tag.lang != 0 || tag.region != 0 || tag.script != 0 {
|
||||
max, _ = max.canonicalize(All)
|
||||
max, _ = addTags(max)
|
||||
max.remakeString()
|
||||
}
|
||||
return haveTag{tag, index, Exact, max.region, max.script, altScript(max.lang, max.script), 0}, max.lang
|
||||
}
|
||||
|
||||
// altScript returns an alternative script that may match the given script with
|
||||
// a low confidence. At the moment, the langMatch data allows for at most one
|
||||
// script to map to another and we rely on this to keep the code simple.
|
||||
func altScript(l langID, s scriptID) scriptID {
|
||||
for _, alt := range matchScript {
|
||||
// TODO: also match cases where language is not the same.
|
||||
if (langID(alt.wantLang) == l || langID(alt.haveLang) == l) &&
|
||||
scriptID(alt.haveScript) == s {
|
||||
return scriptID(alt.wantScript)
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// addIfNew adds a haveTag to the list of tags only if it is a unique tag.
|
||||
// Tags that have the same maximized values are linked by index.
|
||||
func (h *matchHeader) addIfNew(n haveTag, exact bool) {
|
||||
h.original = h.original || exact
|
||||
// Don't add new exact matches.
|
||||
for _, v := range h.haveTags {
|
||||
if v.tag.equalsRest(n.tag) {
|
||||
return
|
||||
}
|
||||
}
|
||||
// Allow duplicate maximized tags, but create a linked list to allow quickly
|
||||
// comparing the equivalents and bail out.
|
||||
for i, v := range h.haveTags {
|
||||
if v.maxScript == n.maxScript &&
|
||||
v.maxRegion == n.maxRegion &&
|
||||
v.tag.variantOrPrivateTagStr() == n.tag.variantOrPrivateTagStr() {
|
||||
for h.haveTags[i].nextMax != 0 {
|
||||
i = int(h.haveTags[i].nextMax)
|
||||
}
|
||||
h.haveTags[i].nextMax = uint16(len(h.haveTags))
|
||||
break
|
||||
}
|
||||
}
|
||||
h.haveTags = append(h.haveTags, &n)
|
||||
}
|
||||
|
||||
// header returns the matchHeader for the given language. It creates one if
|
||||
// it doesn't already exist.
|
||||
func (m *matcher) header(l langID) *matchHeader {
|
||||
if h := m.index[l]; h != nil {
|
||||
return h
|
||||
}
|
||||
h := &matchHeader{}
|
||||
m.index[l] = h
|
||||
return h
|
||||
}
|
||||
|
||||
func toConf(d uint8) Confidence {
|
||||
if d <= 10 {
|
||||
return High
|
||||
}
|
||||
if d < 30 {
|
||||
return Low
|
||||
}
|
||||
return No
|
||||
}
|
||||
|
||||
// newMatcher builds an index for the given supported tags and returns it as
|
||||
// a matcher. It also expands the index by considering various equivalence classes
|
||||
// for a given tag.
|
||||
func newMatcher(supported []Tag, options []MatchOption) *matcher {
|
||||
m := &matcher{
|
||||
index: make(map[langID]*matchHeader),
|
||||
preferSameScript: true,
|
||||
}
|
||||
for _, o := range options {
|
||||
o(m)
|
||||
}
|
||||
if len(supported) == 0 {
|
||||
m.default_ = &haveTag{}
|
||||
return m
|
||||
}
|
||||
// Add supported languages to the index. Add exact matches first to give
|
||||
// them precedence.
|
||||
for i, tag := range supported {
|
||||
pair, _ := makeHaveTag(tag, i)
|
||||
m.header(tag.lang).addIfNew(pair, true)
|
||||
m.supported = append(m.supported, &pair)
|
||||
}
|
||||
m.default_ = m.header(supported[0].lang).haveTags[0]
|
||||
// Keep these in two different loops to support the case that two equivalent
|
||||
// languages are distinguished, such as iw and he.
|
||||
for i, tag := range supported {
|
||||
pair, max := makeHaveTag(tag, i)
|
||||
if max != tag.lang {
|
||||
m.header(max).addIfNew(pair, true)
|
||||
}
|
||||
}
|
||||
|
||||
// update is used to add indexes in the map for equivalent languages.
|
||||
// update will only add entries to original indexes, thus not computing any
|
||||
// transitive relations.
|
||||
update := func(want, have uint16, conf Confidence) {
|
||||
if hh := m.index[langID(have)]; hh != nil {
|
||||
if !hh.original {
|
||||
return
|
||||
}
|
||||
hw := m.header(langID(want))
|
||||
for _, ht := range hh.haveTags {
|
||||
v := *ht
|
||||
if conf < v.conf {
|
||||
v.conf = conf
|
||||
}
|
||||
v.nextMax = 0 // this value needs to be recomputed
|
||||
if v.altScript != 0 {
|
||||
v.altScript = altScript(langID(want), v.maxScript)
|
||||
}
|
||||
hw.addIfNew(v, conf == Exact && hh.original)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add entries for languages with mutual intelligibility as defined by CLDR's
|
||||
// languageMatch data.
|
||||
for _, ml := range matchLang {
|
||||
update(ml.want, ml.have, toConf(ml.distance))
|
||||
if !ml.oneway {
|
||||
update(ml.have, ml.want, toConf(ml.distance))
|
||||
}
|
||||
}
|
||||
|
||||
// Add entries for possible canonicalizations. This is an optimization to
|
||||
// ensure that only one map lookup needs to be done at runtime per desired tag.
|
||||
// First we match deprecated equivalents. If they are perfect equivalents
|
||||
// (their canonicalization simply substitutes a different language code, but
|
||||
// nothing else), the match confidence is Exact, otherwise it is High.
|
||||
for i, lm := range langAliasMap {
|
||||
// If deprecated codes match and there is no fiddling with the script or
|
||||
// or region, we consider it an exact match.
|
||||
conf := Exact
|
||||
if langAliasTypes[i] != langMacro {
|
||||
if !isExactEquivalent(langID(lm.from)) {
|
||||
conf = High
|
||||
}
|
||||
update(lm.to, lm.from, conf)
|
||||
}
|
||||
update(lm.from, lm.to, conf)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// getBest gets the best matching tag in m for any of the given tags, taking into
|
||||
// account the order of preference of the given tags.
|
||||
func (m *matcher) getBest(want ...Tag) (got *haveTag, orig Tag, c Confidence) {
|
||||
best := bestMatch{}
|
||||
for i, w := range want {
|
||||
var max Tag
|
||||
// Check for exact match first.
|
||||
h := m.index[w.lang]
|
||||
if w.lang != 0 {
|
||||
if h == nil {
|
||||
continue
|
||||
}
|
||||
// Base language is defined.
|
||||
max, _ = w.canonicalize(Legacy | Deprecated | Macro)
|
||||
// A region that is added through canonicalization is stronger than
|
||||
// a maximized region: set it in the original (e.g. mo -> ro-MD).
|
||||
if w.region != max.region {
|
||||
w.region = max.region
|
||||
}
|
||||
// TODO: should we do the same for scripts?
|
||||
// See test case: en, sr, nl ; sh ; sr
|
||||
max, _ = addTags(max)
|
||||
} else {
|
||||
// Base language is not defined.
|
||||
if h != nil {
|
||||
for i := range h.haveTags {
|
||||
have := h.haveTags[i]
|
||||
if have.tag.equalsRest(w) {
|
||||
return have, w, Exact
|
||||
}
|
||||
}
|
||||
}
|
||||
if w.script == 0 && w.region == 0 {
|
||||
// We skip all tags matching und for approximate matching, including
|
||||
// private tags.
|
||||
continue
|
||||
}
|
||||
max, _ = addTags(w)
|
||||
if h = m.index[max.lang]; h == nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
pin := true
|
||||
for _, t := range want[i+1:] {
|
||||
if w.lang == t.lang {
|
||||
pin = false
|
||||
break
|
||||
}
|
||||
}
|
||||
// Check for match based on maximized tag.
|
||||
for i := range h.haveTags {
|
||||
have := h.haveTags[i]
|
||||
best.update(have, w, max.script, max.region, pin)
|
||||
if best.conf == Exact {
|
||||
for have.nextMax != 0 {
|
||||
have = h.haveTags[have.nextMax]
|
||||
best.update(have, w, max.script, max.region, pin)
|
||||
}
|
||||
return best.have, best.want, best.conf
|
||||
}
|
||||
}
|
||||
}
|
||||
if best.conf <= No {
|
||||
if len(want) != 0 {
|
||||
return nil, want[0], No
|
||||
}
|
||||
return nil, Tag{}, No
|
||||
}
|
||||
return best.have, best.want, best.conf
|
||||
}
|
||||
|
||||
// bestMatch accumulates the best match so far.
|
||||
type bestMatch struct {
|
||||
have *haveTag
|
||||
want Tag
|
||||
conf Confidence
|
||||
pinnedRegion regionID
|
||||
pinLanguage bool
|
||||
sameRegionGroup bool
|
||||
// Cached results from applying tie-breaking rules.
|
||||
origLang bool
|
||||
origReg bool
|
||||
paradigmReg bool
|
||||
regGroupDist uint8
|
||||
origScript bool
|
||||
}
|
||||
|
||||
// update updates the existing best match if the new pair is considered to be a
|
||||
// better match. To determine if the given pair is a better match, it first
|
||||
// computes the rough confidence level. If this surpasses the current match, it
|
||||
// will replace it and update the tie-breaker rule cache. If there is a tie, it
|
||||
// proceeds with applying a series of tie-breaker rules. If there is no
|
||||
// conclusive winner after applying the tie-breaker rules, it leaves the current
|
||||
// match as the preferred match.
|
||||
//
|
||||
// If pin is true and have and tag are a strong match, it will henceforth only
|
||||
// consider matches for this language. This corresponds to the nothing that most
|
||||
// users have a strong preference for the first defined language. A user can
|
||||
// still prefer a second language over a dialect of the preferred language by
|
||||
// explicitly specifying dialects, e.g. "en, nl, en-GB". In this case pin should
|
||||
// be false.
|
||||
func (m *bestMatch) update(have *haveTag, tag Tag, maxScript scriptID, maxRegion regionID, pin bool) {
|
||||
// Bail if the maximum attainable confidence is below that of the current best match.
|
||||
c := have.conf
|
||||
if c < m.conf {
|
||||
return
|
||||
}
|
||||
// Don't change the language once we already have found an exact match.
|
||||
if m.pinLanguage && tag.lang != m.want.lang {
|
||||
return
|
||||
}
|
||||
// Pin the region group if we are comparing tags for the same language.
|
||||
if tag.lang == m.want.lang && m.sameRegionGroup {
|
||||
_, sameGroup := regionGroupDist(m.pinnedRegion, have.maxRegion, have.maxScript, m.want.lang)
|
||||
if !sameGroup {
|
||||
return
|
||||
}
|
||||
}
|
||||
if c == Exact && have.maxScript == maxScript {
|
||||
// If there is another language and then another entry of this language,
|
||||
// don't pin anything, otherwise pin the language.
|
||||
m.pinLanguage = pin
|
||||
}
|
||||
if have.tag.equalsRest(tag) {
|
||||
} else if have.maxScript != maxScript {
|
||||
// There is usually very little comprehension between different scripts.
|
||||
// In a few cases there may still be Low comprehension. This possibility
|
||||
// is pre-computed and stored in have.altScript.
|
||||
if Low < m.conf || have.altScript != maxScript {
|
||||
return
|
||||
}
|
||||
c = Low
|
||||
} else if have.maxRegion != maxRegion {
|
||||
if High < c {
|
||||
// There is usually a small difference between languages across regions.
|
||||
c = High
|
||||
}
|
||||
}
|
||||
|
||||
// We store the results of the computations of the tie-breaker rules along
|
||||
// with the best match. There is no need to do the checks once we determine
|
||||
// we have a winner, but we do still need to do the tie-breaker computations.
|
||||
// We use "beaten" to keep track if we still need to do the checks.
|
||||
beaten := false // true if the new pair defeats the current one.
|
||||
if c != m.conf {
|
||||
if c < m.conf {
|
||||
return
|
||||
}
|
||||
beaten = true
|
||||
}
|
||||
|
||||
// Tie-breaker rules:
|
||||
// We prefer if the pre-maximized language was specified and identical.
|
||||
origLang := have.tag.lang == tag.lang && tag.lang != 0
|
||||
if !beaten && m.origLang != origLang {
|
||||
if m.origLang {
|
||||
return
|
||||
}
|
||||
beaten = true
|
||||
}
|
||||
|
||||
// We prefer if the pre-maximized region was specified and identical.
|
||||
origReg := have.tag.region == tag.region && tag.region != 0
|
||||
if !beaten && m.origReg != origReg {
|
||||
if m.origReg {
|
||||
return
|
||||
}
|
||||
beaten = true
|
||||
}
|
||||
|
||||
regGroupDist, sameGroup := regionGroupDist(have.maxRegion, maxRegion, maxScript, tag.lang)
|
||||
if !beaten && m.regGroupDist != regGroupDist {
|
||||
if regGroupDist > m.regGroupDist {
|
||||
return
|
||||
}
|
||||
beaten = true
|
||||
}
|
||||
|
||||
paradigmReg := isParadigmLocale(tag.lang, have.maxRegion)
|
||||
if !beaten && m.paradigmReg != paradigmReg {
|
||||
if !paradigmReg {
|
||||
return
|
||||
}
|
||||
beaten = true
|
||||
}
|
||||
|
||||
// Next we prefer if the pre-maximized script was specified and identical.
|
||||
origScript := have.tag.script == tag.script && tag.script != 0
|
||||
if !beaten && m.origScript != origScript {
|
||||
if m.origScript {
|
||||
return
|
||||
}
|
||||
beaten = true
|
||||
}
|
||||
|
||||
// Update m to the newly found best match.
|
||||
if beaten {
|
||||
m.have = have
|
||||
m.want = tag
|
||||
m.conf = c
|
||||
m.pinnedRegion = maxRegion
|
||||
m.sameRegionGroup = sameGroup
|
||||
m.origLang = origLang
|
||||
m.origReg = origReg
|
||||
m.paradigmReg = paradigmReg
|
||||
m.origScript = origScript
|
||||
m.regGroupDist = regGroupDist
|
||||
}
|
||||
}
|
||||
|
||||
func isParadigmLocale(lang langID, r regionID) bool {
|
||||
for _, e := range paradigmLocales {
|
||||
if langID(e[0]) == lang && (r == regionID(e[1]) || r == regionID(e[2])) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// regionGroupDist computes the distance between two regions based on their
|
||||
// CLDR grouping.
|
||||
func regionGroupDist(a, b regionID, script scriptID, lang langID) (dist uint8, same bool) {
|
||||
const defaultDistance = 4
|
||||
|
||||
aGroup := uint(regionToGroups[a]) << 1
|
||||
bGroup := uint(regionToGroups[b]) << 1
|
||||
for _, ri := range matchRegion {
|
||||
if langID(ri.lang) == lang && (ri.script == 0 || scriptID(ri.script) == script) {
|
||||
group := uint(1 << (ri.group &^ 0x80))
|
||||
if 0x80&ri.group == 0 {
|
||||
if aGroup&bGroup&group != 0 { // Both regions are in the group.
|
||||
return ri.distance, ri.distance == defaultDistance
|
||||
}
|
||||
} else {
|
||||
if (aGroup|bGroup)&group == 0 { // Both regions are not in the group.
|
||||
return ri.distance, ri.distance == defaultDistance
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return defaultDistance, true
|
||||
}
|
||||
|
||||
func (t Tag) variants() string {
|
||||
if t.pVariant == 0 {
|
||||
return ""
|
||||
}
|
||||
return t.str[t.pVariant:t.pExt]
|
||||
}
|
||||
|
||||
// variantOrPrivateTagStr returns variants or private use tags.
|
||||
func (t Tag) variantOrPrivateTagStr() string {
|
||||
if t.pExt > 0 {
|
||||
return t.str[t.pVariant:t.pExt]
|
||||
}
|
||||
return t.str[t.pVariant:]
|
||||
}
|
||||
|
||||
// equalsRest compares everything except the language.
|
||||
func (a Tag) equalsRest(b Tag) bool {
|
||||
// TODO: don't include extensions in this comparison. To do this efficiently,
|
||||
// though, we should handle private tags separately.
|
||||
return a.script == b.script && a.region == b.region && a.variantOrPrivateTagStr() == b.variantOrPrivateTagStr()
|
||||
}
|
||||
|
||||
// isExactEquivalent returns true if canonicalizing the language will not alter
|
||||
// the script or region of a tag.
|
||||
func isExactEquivalent(l langID) bool {
|
||||
for _, o := range notEquivalent {
|
||||
if o == l {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
var notEquivalent []langID
|
||||
|
||||
func init() {
|
||||
// Create a list of all languages for which canonicalization may alter the
|
||||
// script or region.
|
||||
for _, lm := range langAliasMap {
|
||||
tag := Tag{lang: langID(lm.from)}
|
||||
if tag, _ = tag.canonicalize(All); tag.script != 0 || tag.region != 0 {
|
||||
notEquivalent = append(notEquivalent, langID(lm.from))
|
||||
}
|
||||
}
|
||||
// Maximize undefined regions of paradigm locales.
|
||||
for i, v := range paradigmLocales {
|
||||
max, _ := addTags(Tag{lang: langID(v[0])})
|
||||
if v[1] == 0 {
|
||||
paradigmLocales[i][1] = uint16(max.region)
|
||||
}
|
||||
if v[2] == 0 {
|
||||
paradigmLocales[i][2] = uint16(max.region)
|
||||
}
|
||||
}
|
||||
}
|
859
vendor/golang.org/x/text/language/parse.go
generated
vendored
859
vendor/golang.org/x/text/language/parse.go
generated
vendored
@ -1,859 +0,0 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package language
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/text/internal/tag"
|
||||
)
|
||||
|
||||
// isAlpha returns true if the byte is not a digit.
|
||||
// b must be an ASCII letter or digit.
|
||||
func isAlpha(b byte) bool {
|
||||
return b > '9'
|
||||
}
|
||||
|
||||
// isAlphaNum returns true if the string contains only ASCII letters or digits.
|
||||
func isAlphaNum(s []byte) bool {
|
||||
for _, c := range s {
|
||||
if !('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9') {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// errSyntax is returned by any of the parsing functions when the
|
||||
// input is not well-formed, according to BCP 47.
|
||||
// TODO: return the position at which the syntax error occurred?
|
||||
var errSyntax = errors.New("language: tag is not well-formed")
|
||||
|
||||
// ValueError is returned by any of the parsing functions when the
|
||||
// input is well-formed but the respective subtag is not recognized
|
||||
// as a valid value.
|
||||
type ValueError struct {
|
||||
v [8]byte
|
||||
}
|
||||
|
||||
func mkErrInvalid(s []byte) error {
|
||||
var e ValueError
|
||||
copy(e.v[:], s)
|
||||
return e
|
||||
}
|
||||
|
||||
func (e ValueError) tag() []byte {
|
||||
n := bytes.IndexByte(e.v[:], 0)
|
||||
if n == -1 {
|
||||
n = 8
|
||||
}
|
||||
return e.v[:n]
|
||||
}
|
||||
|
||||
// Error implements the error interface.
|
||||
func (e ValueError) Error() string {
|
||||
return fmt.Sprintf("language: subtag %q is well-formed but unknown", e.tag())
|
||||
}
|
||||
|
||||
// Subtag returns the subtag for which the error occurred.
|
||||
func (e ValueError) Subtag() string {
|
||||
return string(e.tag())
|
||||
}
|
||||
|
||||
// scanner is used to scan BCP 47 tokens, which are separated by _ or -.
|
||||
type scanner struct {
|
||||
b []byte
|
||||
bytes [max99thPercentileSize]byte
|
||||
token []byte
|
||||
start int // start position of the current token
|
||||
end int // end position of the current token
|
||||
next int // next point for scan
|
||||
err error
|
||||
done bool
|
||||
}
|
||||
|
||||
func makeScannerString(s string) scanner {
|
||||
scan := scanner{}
|
||||
if len(s) <= len(scan.bytes) {
|
||||
scan.b = scan.bytes[:copy(scan.bytes[:], s)]
|
||||
} else {
|
||||
scan.b = []byte(s)
|
||||
}
|
||||
scan.init()
|
||||
return scan
|
||||
}
|
||||
|
||||
// makeScanner returns a scanner using b as the input buffer.
|
||||
// b is not copied and may be modified by the scanner routines.
|
||||
func makeScanner(b []byte) scanner {
|
||||
scan := scanner{b: b}
|
||||
scan.init()
|
||||
return scan
|
||||
}
|
||||
|
||||
func (s *scanner) init() {
|
||||
for i, c := range s.b {
|
||||
if c == '_' {
|
||||
s.b[i] = '-'
|
||||
}
|
||||
}
|
||||
s.scan()
|
||||
}
|
||||
|
||||
// restToLower converts the string between start and end to lower case.
|
||||
func (s *scanner) toLower(start, end int) {
|
||||
for i := start; i < end; i++ {
|
||||
c := s.b[i]
|
||||
if 'A' <= c && c <= 'Z' {
|
||||
s.b[i] += 'a' - 'A'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *scanner) setError(e error) {
|
||||
if s.err == nil || (e == errSyntax && s.err != errSyntax) {
|
||||
s.err = e
|
||||
}
|
||||
}
|
||||
|
||||
// resizeRange shrinks or grows the array at position oldStart such that
|
||||
// a new string of size newSize can fit between oldStart and oldEnd.
|
||||
// Sets the scan point to after the resized range.
|
||||
func (s *scanner) resizeRange(oldStart, oldEnd, newSize int) {
|
||||
s.start = oldStart
|
||||
if end := oldStart + newSize; end != oldEnd {
|
||||
diff := end - oldEnd
|
||||
if end < cap(s.b) {
|
||||
b := make([]byte, len(s.b)+diff)
|
||||
copy(b, s.b[:oldStart])
|
||||
copy(b[end:], s.b[oldEnd:])
|
||||
s.b = b
|
||||
} else {
|
||||
s.b = append(s.b[end:], s.b[oldEnd:]...)
|
||||
}
|
||||
s.next = end + (s.next - s.end)
|
||||
s.end = end
|
||||
}
|
||||
}
|
||||
|
||||
// replace replaces the current token with repl.
|
||||
func (s *scanner) replace(repl string) {
|
||||
s.resizeRange(s.start, s.end, len(repl))
|
||||
copy(s.b[s.start:], repl)
|
||||
}
|
||||
|
||||
// gobble removes the current token from the input.
|
||||
// Caller must call scan after calling gobble.
|
||||
func (s *scanner) gobble(e error) {
|
||||
s.setError(e)
|
||||
if s.start == 0 {
|
||||
s.b = s.b[:+copy(s.b, s.b[s.next:])]
|
||||
s.end = 0
|
||||
} else {
|
||||
s.b = s.b[:s.start-1+copy(s.b[s.start-1:], s.b[s.end:])]
|
||||
s.end = s.start - 1
|
||||
}
|
||||
s.next = s.start
|
||||
}
|
||||
|
||||
// deleteRange removes the given range from s.b before the current token.
|
||||
func (s *scanner) deleteRange(start, end int) {
|
||||
s.setError(errSyntax)
|
||||
s.b = s.b[:start+copy(s.b[start:], s.b[end:])]
|
||||
diff := end - start
|
||||
s.next -= diff
|
||||
s.start -= diff
|
||||
s.end -= diff
|
||||
}
|
||||
|
||||
// scan parses the next token of a BCP 47 string. Tokens that are larger
|
||||
// than 8 characters or include non-alphanumeric characters result in an error
|
||||
// and are gobbled and removed from the output.
|
||||
// It returns the end position of the last token consumed.
|
||||
func (s *scanner) scan() (end int) {
|
||||
end = s.end
|
||||
s.token = nil
|
||||
for s.start = s.next; s.next < len(s.b); {
|
||||
i := bytes.IndexByte(s.b[s.next:], '-')
|
||||
if i == -1 {
|
||||
s.end = len(s.b)
|
||||
s.next = len(s.b)
|
||||
i = s.end - s.start
|
||||
} else {
|
||||
s.end = s.next + i
|
||||
s.next = s.end + 1
|
||||
}
|
||||
token := s.b[s.start:s.end]
|
||||
if i < 1 || i > 8 || !isAlphaNum(token) {
|
||||
s.gobble(errSyntax)
|
||||
continue
|
||||
}
|
||||
s.token = token
|
||||
return end
|
||||
}
|
||||
if n := len(s.b); n > 0 && s.b[n-1] == '-' {
|
||||
s.setError(errSyntax)
|
||||
s.b = s.b[:len(s.b)-1]
|
||||
}
|
||||
s.done = true
|
||||
return end
|
||||
}
|
||||
|
||||
// acceptMinSize parses multiple tokens of the given size or greater.
|
||||
// It returns the end position of the last token consumed.
|
||||
func (s *scanner) acceptMinSize(min int) (end int) {
|
||||
end = s.end
|
||||
s.scan()
|
||||
for ; len(s.token) >= min; s.scan() {
|
||||
end = s.end
|
||||
}
|
||||
return end
|
||||
}
|
||||
|
||||
// Parse parses the given BCP 47 string and returns a valid Tag. If parsing
|
||||
// failed it returns an error and any part of the tag that could be parsed.
|
||||
// If parsing succeeded but an unknown value was found, it returns
|
||||
// ValueError. The Tag returned in this case is just stripped of the unknown
|
||||
// value. All other values are preserved. It accepts tags in the BCP 47 format
|
||||
// and extensions to this standard defined in
|
||||
// http://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers.
|
||||
// The resulting tag is canonicalized using the default canonicalization type.
|
||||
func Parse(s string) (t Tag, err error) {
|
||||
return Default.Parse(s)
|
||||
}
|
||||
|
||||
// Parse parses the given BCP 47 string and returns a valid Tag. If parsing
|
||||
// failed it returns an error and any part of the tag that could be parsed.
|
||||
// If parsing succeeded but an unknown value was found, it returns
|
||||
// ValueError. The Tag returned in this case is just stripped of the unknown
|
||||
// value. All other values are preserved. It accepts tags in the BCP 47 format
|
||||
// and extensions to this standard defined in
|
||||
// http://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers.
|
||||
// The resulting tag is canonicalized using the the canonicalization type c.
|
||||
func (c CanonType) Parse(s string) (t Tag, err error) {
|
||||
// TODO: consider supporting old-style locale key-value pairs.
|
||||
if s == "" {
|
||||
return und, errSyntax
|
||||
}
|
||||
if len(s) <= maxAltTaglen {
|
||||
b := [maxAltTaglen]byte{}
|
||||
for i, c := range s {
|
||||
// Generating invalid UTF-8 is okay as it won't match.
|
||||
if 'A' <= c && c <= 'Z' {
|
||||
c += 'a' - 'A'
|
||||
} else if c == '_' {
|
||||
c = '-'
|
||||
}
|
||||
b[i] = byte(c)
|
||||
}
|
||||
if t, ok := grandfathered(b); ok {
|
||||
return t, nil
|
||||
}
|
||||
}
|
||||
scan := makeScannerString(s)
|
||||
t, err = parse(&scan, s)
|
||||
t, changed := t.canonicalize(c)
|
||||
if changed {
|
||||
t.remakeString()
|
||||
}
|
||||
return t, err
|
||||
}
|
||||
|
||||
func parse(scan *scanner, s string) (t Tag, err error) {
|
||||
t = und
|
||||
var end int
|
||||
if n := len(scan.token); n <= 1 {
|
||||
scan.toLower(0, len(scan.b))
|
||||
if n == 0 || scan.token[0] != 'x' {
|
||||
return t, errSyntax
|
||||
}
|
||||
end = parseExtensions(scan)
|
||||
} else if n >= 4 {
|
||||
return und, errSyntax
|
||||
} else { // the usual case
|
||||
t, end = parseTag(scan)
|
||||
if n := len(scan.token); n == 1 {
|
||||
t.pExt = uint16(end)
|
||||
end = parseExtensions(scan)
|
||||
} else if end < len(scan.b) {
|
||||
scan.setError(errSyntax)
|
||||
scan.b = scan.b[:end]
|
||||
}
|
||||
}
|
||||
if int(t.pVariant) < len(scan.b) {
|
||||
if end < len(s) {
|
||||
s = s[:end]
|
||||
}
|
||||
if len(s) > 0 && tag.Compare(s, scan.b) == 0 {
|
||||
t.str = s
|
||||
} else {
|
||||
t.str = string(scan.b)
|
||||
}
|
||||
} else {
|
||||
t.pVariant, t.pExt = 0, 0
|
||||
}
|
||||
return t, scan.err
|
||||
}
|
||||
|
||||
// parseTag parses language, script, region and variants.
|
||||
// It returns a Tag and the end position in the input that was parsed.
|
||||
func parseTag(scan *scanner) (t Tag, end int) {
|
||||
var e error
|
||||
// TODO: set an error if an unknown lang, script or region is encountered.
|
||||
t.lang, e = getLangID(scan.token)
|
||||
scan.setError(e)
|
||||
scan.replace(t.lang.String())
|
||||
langStart := scan.start
|
||||
end = scan.scan()
|
||||
for len(scan.token) == 3 && isAlpha(scan.token[0]) {
|
||||
// From http://tools.ietf.org/html/bcp47, <lang>-<extlang> tags are equivalent
|
||||
// to a tag of the form <extlang>.
|
||||
lang, e := getLangID(scan.token)
|
||||
if lang != 0 {
|
||||
t.lang = lang
|
||||
copy(scan.b[langStart:], lang.String())
|
||||
scan.b[langStart+3] = '-'
|
||||
scan.start = langStart + 4
|
||||
}
|
||||
scan.gobble(e)
|
||||
end = scan.scan()
|
||||
}
|
||||
if len(scan.token) == 4 && isAlpha(scan.token[0]) {
|
||||
t.script, e = getScriptID(script, scan.token)
|
||||
if t.script == 0 {
|
||||
scan.gobble(e)
|
||||
}
|
||||
end = scan.scan()
|
||||
}
|
||||
if n := len(scan.token); n >= 2 && n <= 3 {
|
||||
t.region, e = getRegionID(scan.token)
|
||||
if t.region == 0 {
|
||||
scan.gobble(e)
|
||||
} else {
|
||||
scan.replace(t.region.String())
|
||||
}
|
||||
end = scan.scan()
|
||||
}
|
||||
scan.toLower(scan.start, len(scan.b))
|
||||
t.pVariant = byte(end)
|
||||
end = parseVariants(scan, end, t)
|
||||
t.pExt = uint16(end)
|
||||
return t, end
|
||||
}
|
||||
|
||||
var separator = []byte{'-'}
|
||||
|
||||
// parseVariants scans tokens as long as each token is a valid variant string.
|
||||
// Duplicate variants are removed.
|
||||
func parseVariants(scan *scanner, end int, t Tag) int {
|
||||
start := scan.start
|
||||
varIDBuf := [4]uint8{}
|
||||
variantBuf := [4][]byte{}
|
||||
varID := varIDBuf[:0]
|
||||
variant := variantBuf[:0]
|
||||
last := -1
|
||||
needSort := false
|
||||
for ; len(scan.token) >= 4; scan.scan() {
|
||||
// TODO: measure the impact of needing this conversion and redesign
|
||||
// the data structure if there is an issue.
|
||||
v, ok := variantIndex[string(scan.token)]
|
||||
if !ok {
|
||||
// unknown variant
|
||||
// TODO: allow user-defined variants?
|
||||
scan.gobble(mkErrInvalid(scan.token))
|
||||
continue
|
||||
}
|
||||
varID = append(varID, v)
|
||||
variant = append(variant, scan.token)
|
||||
if !needSort {
|
||||
if last < int(v) {
|
||||
last = int(v)
|
||||
} else {
|
||||
needSort = true
|
||||
// There is no legal combinations of more than 7 variants
|
||||
// (and this is by no means a useful sequence).
|
||||
const maxVariants = 8
|
||||
if len(varID) > maxVariants {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
end = scan.end
|
||||
}
|
||||
if needSort {
|
||||
sort.Sort(variantsSort{varID, variant})
|
||||
k, l := 0, -1
|
||||
for i, v := range varID {
|
||||
w := int(v)
|
||||
if l == w {
|
||||
// Remove duplicates.
|
||||
continue
|
||||
}
|
||||
varID[k] = varID[i]
|
||||
variant[k] = variant[i]
|
||||
k++
|
||||
l = w
|
||||
}
|
||||
if str := bytes.Join(variant[:k], separator); len(str) == 0 {
|
||||
end = start - 1
|
||||
} else {
|
||||
scan.resizeRange(start, end, len(str))
|
||||
copy(scan.b[scan.start:], str)
|
||||
end = scan.end
|
||||
}
|
||||
}
|
||||
return end
|
||||
}
|
||||
|
||||
type variantsSort struct {
|
||||
i []uint8
|
||||
v [][]byte
|
||||
}
|
||||
|
||||
func (s variantsSort) Len() int {
|
||||
return len(s.i)
|
||||
}
|
||||
|
||||
func (s variantsSort) Swap(i, j int) {
|
||||
s.i[i], s.i[j] = s.i[j], s.i[i]
|
||||
s.v[i], s.v[j] = s.v[j], s.v[i]
|
||||
}
|
||||
|
||||
func (s variantsSort) Less(i, j int) bool {
|
||||
return s.i[i] < s.i[j]
|
||||
}
|
||||
|
||||
type bytesSort [][]byte
|
||||
|
||||
func (b bytesSort) Len() int {
|
||||
return len(b)
|
||||
}
|
||||
|
||||
func (b bytesSort) Swap(i, j int) {
|
||||
b[i], b[j] = b[j], b[i]
|
||||
}
|
||||
|
||||
func (b bytesSort) Less(i, j int) bool {
|
||||
return bytes.Compare(b[i], b[j]) == -1
|
||||
}
|
||||
|
||||
// parseExtensions parses and normalizes the extensions in the buffer.
|
||||
// It returns the last position of scan.b that is part of any extension.
|
||||
// It also trims scan.b to remove excess parts accordingly.
|
||||
func parseExtensions(scan *scanner) int {
|
||||
start := scan.start
|
||||
exts := [][]byte{}
|
||||
private := []byte{}
|
||||
end := scan.end
|
||||
for len(scan.token) == 1 {
|
||||
extStart := scan.start
|
||||
ext := scan.token[0]
|
||||
end = parseExtension(scan)
|
||||
extension := scan.b[extStart:end]
|
||||
if len(extension) < 3 || (ext != 'x' && len(extension) < 4) {
|
||||
scan.setError(errSyntax)
|
||||
end = extStart
|
||||
continue
|
||||
} else if start == extStart && (ext == 'x' || scan.start == len(scan.b)) {
|
||||
scan.b = scan.b[:end]
|
||||
return end
|
||||
} else if ext == 'x' {
|
||||
private = extension
|
||||
break
|
||||
}
|
||||
exts = append(exts, extension)
|
||||
}
|
||||
sort.Sort(bytesSort(exts))
|
||||
if len(private) > 0 {
|
||||
exts = append(exts, private)
|
||||
}
|
||||
scan.b = scan.b[:start]
|
||||
if len(exts) > 0 {
|
||||
scan.b = append(scan.b, bytes.Join(exts, separator)...)
|
||||
} else if start > 0 {
|
||||
// Strip trailing '-'.
|
||||
scan.b = scan.b[:start-1]
|
||||
}
|
||||
return end
|
||||
}
|
||||
|
||||
// parseExtension parses a single extension and returns the position of
|
||||
// the extension end.
|
||||
func parseExtension(scan *scanner) int {
|
||||
start, end := scan.start, scan.end
|
||||
switch scan.token[0] {
|
||||
case 'u':
|
||||
attrStart := end
|
||||
scan.scan()
|
||||
for last := []byte{}; len(scan.token) > 2; scan.scan() {
|
||||
if bytes.Compare(scan.token, last) != -1 {
|
||||
// Attributes are unsorted. Start over from scratch.
|
||||
p := attrStart + 1
|
||||
scan.next = p
|
||||
attrs := [][]byte{}
|
||||
for scan.scan(); len(scan.token) > 2; scan.scan() {
|
||||
attrs = append(attrs, scan.token)
|
||||
end = scan.end
|
||||
}
|
||||
sort.Sort(bytesSort(attrs))
|
||||
copy(scan.b[p:], bytes.Join(attrs, separator))
|
||||
break
|
||||
}
|
||||
last = scan.token
|
||||
end = scan.end
|
||||
}
|
||||
var last, key []byte
|
||||
for attrEnd := end; len(scan.token) == 2; last = key {
|
||||
key = scan.token
|
||||
keyEnd := scan.end
|
||||
end = scan.acceptMinSize(3)
|
||||
// TODO: check key value validity
|
||||
if keyEnd == end || bytes.Compare(key, last) != 1 {
|
||||
// We have an invalid key or the keys are not sorted.
|
||||
// Start scanning keys from scratch and reorder.
|
||||
p := attrEnd + 1
|
||||
scan.next = p
|
||||
keys := [][]byte{}
|
||||
for scan.scan(); len(scan.token) == 2; {
|
||||
keyStart, keyEnd := scan.start, scan.end
|
||||
end = scan.acceptMinSize(3)
|
||||
if keyEnd != end {
|
||||
keys = append(keys, scan.b[keyStart:end])
|
||||
} else {
|
||||
scan.setError(errSyntax)
|
||||
end = keyStart
|
||||
}
|
||||
}
|
||||
sort.Sort(bytesSort(keys))
|
||||
reordered := bytes.Join(keys, separator)
|
||||
if e := p + len(reordered); e < end {
|
||||
scan.deleteRange(e, end)
|
||||
end = e
|
||||
}
|
||||
copy(scan.b[p:], bytes.Join(keys, separator))
|
||||
break
|
||||
}
|
||||
}
|
||||
case 't':
|
||||
scan.scan()
|
||||
if n := len(scan.token); n >= 2 && n <= 3 && isAlpha(scan.token[1]) {
|
||||
_, end = parseTag(scan)
|
||||
scan.toLower(start, end)
|
||||
}
|
||||
for len(scan.token) == 2 && !isAlpha(scan.token[1]) {
|
||||
end = scan.acceptMinSize(3)
|
||||
}
|
||||
case 'x':
|
||||
end = scan.acceptMinSize(1)
|
||||
default:
|
||||
end = scan.acceptMinSize(2)
|
||||
}
|
||||
return end
|
||||
}
|
||||
|
||||
// Compose creates a Tag from individual parts, which may be of type Tag, Base,
|
||||
// Script, Region, Variant, []Variant, Extension, []Extension or error. If a
|
||||
// Base, Script or Region or slice of type Variant or Extension is passed more
|
||||
// than once, the latter will overwrite the former. Variants and Extensions are
|
||||
// accumulated, but if two extensions of the same type are passed, the latter
|
||||
// will replace the former. A Tag overwrites all former values and typically
|
||||
// only makes sense as the first argument. The resulting tag is returned after
|
||||
// canonicalizing using the Default CanonType. If one or more errors are
|
||||
// encountered, one of the errors is returned.
|
||||
func Compose(part ...interface{}) (t Tag, err error) {
|
||||
return Default.Compose(part...)
|
||||
}
|
||||
|
||||
// Compose creates a Tag from individual parts, which may be of type Tag, Base,
|
||||
// Script, Region, Variant, []Variant, Extension, []Extension or error. If a
|
||||
// Base, Script or Region or slice of type Variant or Extension is passed more
|
||||
// than once, the latter will overwrite the former. Variants and Extensions are
|
||||
// accumulated, but if two extensions of the same type are passed, the latter
|
||||
// will replace the former. A Tag overwrites all former values and typically
|
||||
// only makes sense as the first argument. The resulting tag is returned after
|
||||
// canonicalizing using CanonType c. If one or more errors are encountered,
|
||||
// one of the errors is returned.
|
||||
func (c CanonType) Compose(part ...interface{}) (t Tag, err error) {
|
||||
var b builder
|
||||
if err = b.update(part...); err != nil {
|
||||
return und, err
|
||||
}
|
||||
t, _ = b.tag.canonicalize(c)
|
||||
|
||||
if len(b.ext) > 0 || len(b.variant) > 0 {
|
||||
sort.Sort(sortVariant(b.variant))
|
||||
sort.Strings(b.ext)
|
||||
if b.private != "" {
|
||||
b.ext = append(b.ext, b.private)
|
||||
}
|
||||
n := maxCoreSize + tokenLen(b.variant...) + tokenLen(b.ext...)
|
||||
buf := make([]byte, n)
|
||||
p := t.genCoreBytes(buf)
|
||||
t.pVariant = byte(p)
|
||||
p += appendTokens(buf[p:], b.variant...)
|
||||
t.pExt = uint16(p)
|
||||
p += appendTokens(buf[p:], b.ext...)
|
||||
t.str = string(buf[:p])
|
||||
} else if b.private != "" {
|
||||
t.str = b.private
|
||||
t.remakeString()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type builder struct {
|
||||
tag Tag
|
||||
|
||||
private string // the x extension
|
||||
ext []string
|
||||
variant []string
|
||||
|
||||
err error
|
||||
}
|
||||
|
||||
func (b *builder) addExt(e string) {
|
||||
if e == "" {
|
||||
} else if e[0] == 'x' {
|
||||
b.private = e
|
||||
} else {
|
||||
b.ext = append(b.ext, e)
|
||||
}
|
||||
}
|
||||
|
||||
var errInvalidArgument = errors.New("invalid Extension or Variant")
|
||||
|
||||
func (b *builder) update(part ...interface{}) (err error) {
|
||||
replace := func(l *[]string, s string, eq func(a, b string) bool) bool {
|
||||
if s == "" {
|
||||
b.err = errInvalidArgument
|
||||
return true
|
||||
}
|
||||
for i, v := range *l {
|
||||
if eq(v, s) {
|
||||
(*l)[i] = s
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
for _, x := range part {
|
||||
switch v := x.(type) {
|
||||
case Tag:
|
||||
b.tag.lang = v.lang
|
||||
b.tag.region = v.region
|
||||
b.tag.script = v.script
|
||||
if v.str != "" {
|
||||
b.variant = nil
|
||||
for x, s := "", v.str[v.pVariant:v.pExt]; s != ""; {
|
||||
x, s = nextToken(s)
|
||||
b.variant = append(b.variant, x)
|
||||
}
|
||||
b.ext, b.private = nil, ""
|
||||
for i, e := int(v.pExt), ""; i < len(v.str); {
|
||||
i, e = getExtension(v.str, i)
|
||||
b.addExt(e)
|
||||
}
|
||||
}
|
||||
case Base:
|
||||
b.tag.lang = v.langID
|
||||
case Script:
|
||||
b.tag.script = v.scriptID
|
||||
case Region:
|
||||
b.tag.region = v.regionID
|
||||
case Variant:
|
||||
if !replace(&b.variant, v.variant, func(a, b string) bool { return a == b }) {
|
||||
b.variant = append(b.variant, v.variant)
|
||||
}
|
||||
case Extension:
|
||||
if !replace(&b.ext, v.s, func(a, b string) bool { return a[0] == b[0] }) {
|
||||
b.addExt(v.s)
|
||||
}
|
||||
case []Variant:
|
||||
b.variant = nil
|
||||
for _, x := range v {
|
||||
b.update(x)
|
||||
}
|
||||
case []Extension:
|
||||
b.ext, b.private = nil, ""
|
||||
for _, e := range v {
|
||||
b.update(e)
|
||||
}
|
||||
// TODO: support parsing of raw strings based on morphology or just extensions?
|
||||
case error:
|
||||
err = v
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func tokenLen(token ...string) (n int) {
|
||||
for _, t := range token {
|
||||
n += len(t) + 1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func appendTokens(b []byte, token ...string) int {
|
||||
p := 0
|
||||
for _, t := range token {
|
||||
b[p] = '-'
|
||||
copy(b[p+1:], t)
|
||||
p += 1 + len(t)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
type sortVariant []string
|
||||
|
||||
func (s sortVariant) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func (s sortVariant) Swap(i, j int) {
|
||||
s[j], s[i] = s[i], s[j]
|
||||
}
|
||||
|
||||
func (s sortVariant) Less(i, j int) bool {
|
||||
return variantIndex[s[i]] < variantIndex[s[j]]
|
||||
}
|
||||
|
||||
func findExt(list []string, x byte) int {
|
||||
for i, e := range list {
|
||||
if e[0] == x {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// getExtension returns the name, body and end position of the extension.
|
||||
func getExtension(s string, p int) (end int, ext string) {
|
||||
if s[p] == '-' {
|
||||
p++
|
||||
}
|
||||
if s[p] == 'x' {
|
||||
return len(s), s[p:]
|
||||
}
|
||||
end = nextExtension(s, p)
|
||||
return end, s[p:end]
|
||||
}
|
||||
|
||||
// nextExtension finds the next extension within the string, searching
|
||||
// for the -<char>- pattern from position p.
|
||||
// In the fast majority of cases, language tags will have at most
|
||||
// one extension and extensions tend to be small.
|
||||
func nextExtension(s string, p int) int {
|
||||
for n := len(s) - 3; p < n; {
|
||||
if s[p] == '-' {
|
||||
if s[p+2] == '-' {
|
||||
return p
|
||||
}
|
||||
p += 3
|
||||
} else {
|
||||
p++
|
||||
}
|
||||
}
|
||||
return len(s)
|
||||
}
|
||||
|
||||
var errInvalidWeight = errors.New("ParseAcceptLanguage: invalid weight")
|
||||
|
||||
// ParseAcceptLanguage parses the contents of an Accept-Language header as
|
||||
// defined in http://www.ietf.org/rfc/rfc2616.txt and returns a list of Tags and
|
||||
// a list of corresponding quality weights. It is more permissive than RFC 2616
|
||||
// and may return non-nil slices even if the input is not valid.
|
||||
// The Tags will be sorted by highest weight first and then by first occurrence.
|
||||
// Tags with a weight of zero will be dropped. An error will be returned if the
|
||||
// input could not be parsed.
|
||||
func ParseAcceptLanguage(s string) (tag []Tag, q []float32, err error) {
|
||||
var entry string
|
||||
for s != "" {
|
||||
if entry, s = split(s, ','); entry == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
entry, weight := split(entry, ';')
|
||||
|
||||
// Scan the language.
|
||||
t, err := Parse(entry)
|
||||
if err != nil {
|
||||
id, ok := acceptFallback[entry]
|
||||
if !ok {
|
||||
return nil, nil, err
|
||||
}
|
||||
t = Tag{lang: id}
|
||||
}
|
||||
|
||||
// Scan the optional weight.
|
||||
w := 1.0
|
||||
if weight != "" {
|
||||
weight = consume(weight, 'q')
|
||||
weight = consume(weight, '=')
|
||||
// consume returns the empty string when a token could not be
|
||||
// consumed, resulting in an error for ParseFloat.
|
||||
if w, err = strconv.ParseFloat(weight, 32); err != nil {
|
||||
return nil, nil, errInvalidWeight
|
||||
}
|
||||
// Drop tags with a quality weight of 0.
|
||||
if w <= 0 {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
tag = append(tag, t)
|
||||
q = append(q, float32(w))
|
||||
}
|
||||
sortStable(&tagSort{tag, q})
|
||||
return tag, q, nil
|
||||
}
|
||||
|
||||
// consume removes a leading token c from s and returns the result or the empty
|
||||
// string if there is no such token.
|
||||
func consume(s string, c byte) string {
|
||||
if s == "" || s[0] != c {
|
||||
return ""
|
||||
}
|
||||
return strings.TrimSpace(s[1:])
|
||||
}
|
||||
|
||||
func split(s string, c byte) (head, tail string) {
|
||||
if i := strings.IndexByte(s, c); i >= 0 {
|
||||
return strings.TrimSpace(s[:i]), strings.TrimSpace(s[i+1:])
|
||||
}
|
||||
return strings.TrimSpace(s), ""
|
||||
}
|
||||
|
||||
// Add hack mapping to deal with a small number of cases that that occur
|
||||
// in Accept-Language (with reasonable frequency).
|
||||
var acceptFallback = map[string]langID{
|
||||
"english": _en,
|
||||
"deutsch": _de,
|
||||
"italian": _it,
|
||||
"french": _fr,
|
||||
"*": _mul, // defined in the spec to match all languages.
|
||||
}
|
||||
|
||||
type tagSort struct {
|
||||
tag []Tag
|
||||
q []float32
|
||||
}
|
||||
|
||||
func (s *tagSort) Len() int {
|
||||
return len(s.q)
|
||||
}
|
||||
|
||||
func (s *tagSort) Less(i, j int) bool {
|
||||
return s.q[i] > s.q[j]
|
||||
}
|
||||
|
||||
func (s *tagSort) Swap(i, j int) {
|
||||
s.tag[i], s.tag[j] = s.tag[j], s.tag[i]
|
||||
s.q[i], s.q[j] = s.q[j], s.q[i]
|
||||
}
|
3686
vendor/golang.org/x/text/language/tables.go
generated
vendored
3686
vendor/golang.org/x/text/language/tables.go
generated
vendored
File diff suppressed because it is too large
Load Diff
143
vendor/golang.org/x/text/language/tags.go
generated
vendored
143
vendor/golang.org/x/text/language/tags.go
generated
vendored
@ -1,143 +0,0 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package language
|
||||
|
||||
// TODO: Various sets of commonly use tags and regions.
|
||||
|
||||
// MustParse is like Parse, but panics if the given BCP 47 tag cannot be parsed.
|
||||
// It simplifies safe initialization of Tag values.
|
||||
func MustParse(s string) Tag {
|
||||
t, err := Parse(s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// MustParse is like Parse, but panics if the given BCP 47 tag cannot be parsed.
|
||||
// It simplifies safe initialization of Tag values.
|
||||
func (c CanonType) MustParse(s string) Tag {
|
||||
t, err := c.Parse(s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// MustParseBase is like ParseBase, but panics if the given base cannot be parsed.
|
||||
// It simplifies safe initialization of Base values.
|
||||
func MustParseBase(s string) Base {
|
||||
b, err := ParseBase(s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// MustParseScript is like ParseScript, but panics if the given script cannot be
|
||||
// parsed. It simplifies safe initialization of Script values.
|
||||
func MustParseScript(s string) Script {
|
||||
scr, err := ParseScript(s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return scr
|
||||
}
|
||||
|
||||
// MustParseRegion is like ParseRegion, but panics if the given region cannot be
|
||||
// parsed. It simplifies safe initialization of Region values.
|
||||
func MustParseRegion(s string) Region {
|
||||
r, err := ParseRegion(s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
var (
|
||||
und = Tag{}
|
||||
|
||||
Und Tag = Tag{}
|
||||
|
||||
Afrikaans Tag = Tag{lang: _af} // af
|
||||
Amharic Tag = Tag{lang: _am} // am
|
||||
Arabic Tag = Tag{lang: _ar} // ar
|
||||
ModernStandardArabic Tag = Tag{lang: _ar, region: _001} // ar-001
|
||||
Azerbaijani Tag = Tag{lang: _az} // az
|
||||
Bulgarian Tag = Tag{lang: _bg} // bg
|
||||
Bengali Tag = Tag{lang: _bn} // bn
|
||||
Catalan Tag = Tag{lang: _ca} // ca
|
||||
Czech Tag = Tag{lang: _cs} // cs
|
||||
Danish Tag = Tag{lang: _da} // da
|
||||
German Tag = Tag{lang: _de} // de
|
||||
Greek Tag = Tag{lang: _el} // el
|
||||
English Tag = Tag{lang: _en} // en
|
||||
AmericanEnglish Tag = Tag{lang: _en, region: _US} // en-US
|
||||
BritishEnglish Tag = Tag{lang: _en, region: _GB} // en-GB
|
||||
Spanish Tag = Tag{lang: _es} // es
|
||||
EuropeanSpanish Tag = Tag{lang: _es, region: _ES} // es-ES
|
||||
LatinAmericanSpanish Tag = Tag{lang: _es, region: _419} // es-419
|
||||
Estonian Tag = Tag{lang: _et} // et
|
||||
Persian Tag = Tag{lang: _fa} // fa
|
||||
Finnish Tag = Tag{lang: _fi} // fi
|
||||
Filipino Tag = Tag{lang: _fil} // fil
|
||||
French Tag = Tag{lang: _fr} // fr
|
||||
CanadianFrench Tag = Tag{lang: _fr, region: _CA} // fr-CA
|
||||
Gujarati Tag = Tag{lang: _gu} // gu
|
||||
Hebrew Tag = Tag{lang: _he} // he
|
||||
Hindi Tag = Tag{lang: _hi} // hi
|
||||
Croatian Tag = Tag{lang: _hr} // hr
|
||||
Hungarian Tag = Tag{lang: _hu} // hu
|
||||
Armenian Tag = Tag{lang: _hy} // hy
|
||||
Indonesian Tag = Tag{lang: _id} // id
|
||||
Icelandic Tag = Tag{lang: _is} // is
|
||||
Italian Tag = Tag{lang: _it} // it
|
||||
Japanese Tag = Tag{lang: _ja} // ja
|
||||
Georgian Tag = Tag{lang: _ka} // ka
|
||||
Kazakh Tag = Tag{lang: _kk} // kk
|
||||
Khmer Tag = Tag{lang: _km} // km
|
||||
Kannada Tag = Tag{lang: _kn} // kn
|
||||
Korean Tag = Tag{lang: _ko} // ko
|
||||
Kirghiz Tag = Tag{lang: _ky} // ky
|
||||
Lao Tag = Tag{lang: _lo} // lo
|
||||
Lithuanian Tag = Tag{lang: _lt} // lt
|
||||
Latvian Tag = Tag{lang: _lv} // lv
|
||||
Macedonian Tag = Tag{lang: _mk} // mk
|
||||
Malayalam Tag = Tag{lang: _ml} // ml
|
||||
Mongolian Tag = Tag{lang: _mn} // mn
|
||||
Marathi Tag = Tag{lang: _mr} // mr
|
||||
Malay Tag = Tag{lang: _ms} // ms
|
||||
Burmese Tag = Tag{lang: _my} // my
|
||||
Nepali Tag = Tag{lang: _ne} // ne
|
||||
Dutch Tag = Tag{lang: _nl} // nl
|
||||
Norwegian Tag = Tag{lang: _no} // no
|
||||
Punjabi Tag = Tag{lang: _pa} // pa
|
||||
Polish Tag = Tag{lang: _pl} // pl
|
||||
Portuguese Tag = Tag{lang: _pt} // pt
|
||||
BrazilianPortuguese Tag = Tag{lang: _pt, region: _BR} // pt-BR
|
||||
EuropeanPortuguese Tag = Tag{lang: _pt, region: _PT} // pt-PT
|
||||
Romanian Tag = Tag{lang: _ro} // ro
|
||||
Russian Tag = Tag{lang: _ru} // ru
|
||||
Sinhala Tag = Tag{lang: _si} // si
|
||||
Slovak Tag = Tag{lang: _sk} // sk
|
||||
Slovenian Tag = Tag{lang: _sl} // sl
|
||||
Albanian Tag = Tag{lang: _sq} // sq
|
||||
Serbian Tag = Tag{lang: _sr} // sr
|
||||
SerbianLatin Tag = Tag{lang: _sr, script: _Latn} // sr-Latn
|
||||
Swedish Tag = Tag{lang: _sv} // sv
|
||||
Swahili Tag = Tag{lang: _sw} // sw
|
||||
Tamil Tag = Tag{lang: _ta} // ta
|
||||
Telugu Tag = Tag{lang: _te} // te
|
||||
Thai Tag = Tag{lang: _th} // th
|
||||
Turkish Tag = Tag{lang: _tr} // tr
|
||||
Ukrainian Tag = Tag{lang: _uk} // uk
|
||||
Urdu Tag = Tag{lang: _ur} // ur
|
||||
Uzbek Tag = Tag{lang: _uz} // uz
|
||||
Vietnamese Tag = Tag{lang: _vi} // vi
|
||||
Chinese Tag = Tag{lang: _zh} // zh
|
||||
SimplifiedChinese Tag = Tag{lang: _zh, script: _Hans} // zh-Hans
|
||||
TraditionalChinese Tag = Tag{lang: _zh, script: _Hant} // zh-Hant
|
||||
Zulu Tag = Tag{lang: _zu} // zu
|
||||
)
|
105
vendor/golang.org/x/text/unicode/cldr/base.go
generated
vendored
105
vendor/golang.org/x/text/unicode/cldr/base.go
generated
vendored
@ -1,105 +0,0 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cldr
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"regexp"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Elem is implemented by every XML element.
|
||||
type Elem interface {
|
||||
setEnclosing(Elem)
|
||||
setName(string)
|
||||
enclosing() Elem
|
||||
|
||||
GetCommon() *Common
|
||||
}
|
||||
|
||||
type hidden struct {
|
||||
CharData string `xml:",chardata"`
|
||||
Alias *struct {
|
||||
Common
|
||||
Source string `xml:"source,attr"`
|
||||
Path string `xml:"path,attr"`
|
||||
} `xml:"alias"`
|
||||
Def *struct {
|
||||
Common
|
||||
Choice string `xml:"choice,attr,omitempty"`
|
||||
Type string `xml:"type,attr,omitempty"`
|
||||
} `xml:"default"`
|
||||
}
|
||||
|
||||
// Common holds several of the most common attributes and sub elements
|
||||
// of an XML element.
|
||||
type Common struct {
|
||||
XMLName xml.Name
|
||||
name string
|
||||
enclElem Elem
|
||||
Type string `xml:"type,attr,omitempty"`
|
||||
Reference string `xml:"reference,attr,omitempty"`
|
||||
Alt string `xml:"alt,attr,omitempty"`
|
||||
ValidSubLocales string `xml:"validSubLocales,attr,omitempty"`
|
||||
Draft string `xml:"draft,attr,omitempty"`
|
||||
hidden
|
||||
}
|
||||
|
||||
// Default returns the default type to select from the enclosed list
|
||||
// or "" if no default value is specified.
|
||||
func (e *Common) Default() string {
|
||||
if e.Def == nil {
|
||||
return ""
|
||||
}
|
||||
if e.Def.Choice != "" {
|
||||
return e.Def.Choice
|
||||
} else if e.Def.Type != "" {
|
||||
// Type is still used by the default element in collation.
|
||||
return e.Def.Type
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Element returns the XML element name.
|
||||
func (e *Common) Element() string {
|
||||
return e.name
|
||||
}
|
||||
|
||||
// GetCommon returns e. It is provided such that Common implements Elem.
|
||||
func (e *Common) GetCommon() *Common {
|
||||
return e
|
||||
}
|
||||
|
||||
// Data returns the character data accumulated for this element.
|
||||
func (e *Common) Data() string {
|
||||
e.CharData = charRe.ReplaceAllStringFunc(e.CharData, replaceUnicode)
|
||||
return e.CharData
|
||||
}
|
||||
|
||||
func (e *Common) setName(s string) {
|
||||
e.name = s
|
||||
}
|
||||
|
||||
func (e *Common) enclosing() Elem {
|
||||
return e.enclElem
|
||||
}
|
||||
|
||||
func (e *Common) setEnclosing(en Elem) {
|
||||
e.enclElem = en
|
||||
}
|
||||
|
||||
// Escape characters that can be escaped without further escaping the string.
|
||||
var charRe = regexp.MustCompile(`&#x[0-9a-fA-F]*;|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|\\x[0-9a-fA-F]{2}|\\[0-7]{3}|\\[abtnvfr]`)
|
||||
|
||||
// replaceUnicode converts hexadecimal Unicode codepoint notations to a one-rune string.
|
||||
// It assumes the input string is correctly formatted.
|
||||
func replaceUnicode(s string) string {
|
||||
if s[1] == '#' {
|
||||
r, _ := strconv.ParseInt(s[3:len(s)-1], 16, 32)
|
||||
return string(r)
|
||||
}
|
||||
r, _, _, _ := strconv.UnquoteChar(s, 0)
|
||||
return string(r)
|
||||
}
|
130
vendor/golang.org/x/text/unicode/cldr/cldr.go
generated
vendored
130
vendor/golang.org/x/text/unicode/cldr/cldr.go
generated
vendored
@ -1,130 +0,0 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:generate go run makexml.go -output xml.go
|
||||
|
||||
// Package cldr provides a parser for LDML and related XML formats.
|
||||
// This package is intended to be used by the table generation tools
|
||||
// for the various internationalization-related packages.
|
||||
// As the XML types are generated from the CLDR DTD, and as the CLDR standard
|
||||
// is periodically amended, this package may change considerably over time.
|
||||
// This mostly means that data may appear and disappear between versions.
|
||||
// That is, old code should keep compiling for newer versions, but data
|
||||
// may have moved or changed.
|
||||
// CLDR version 22 is the first version supported by this package.
|
||||
// Older versions may not work.
|
||||
package cldr // import "golang.org/x/text/unicode/cldr"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// CLDR provides access to parsed data of the Unicode Common Locale Data Repository.
|
||||
type CLDR struct {
|
||||
parent map[string][]string
|
||||
locale map[string]*LDML
|
||||
resolved map[string]*LDML
|
||||
bcp47 *LDMLBCP47
|
||||
supp *SupplementalData
|
||||
}
|
||||
|
||||
func makeCLDR() *CLDR {
|
||||
return &CLDR{
|
||||
parent: make(map[string][]string),
|
||||
locale: make(map[string]*LDML),
|
||||
resolved: make(map[string]*LDML),
|
||||
bcp47: &LDMLBCP47{},
|
||||
supp: &SupplementalData{},
|
||||
}
|
||||
}
|
||||
|
||||
// BCP47 returns the parsed BCP47 LDML data. If no such data was parsed, nil is returned.
|
||||
func (cldr *CLDR) BCP47() *LDMLBCP47 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Draft indicates the draft level of an element.
|
||||
type Draft int
|
||||
|
||||
const (
|
||||
Approved Draft = iota
|
||||
Contributed
|
||||
Provisional
|
||||
Unconfirmed
|
||||
)
|
||||
|
||||
var drafts = []string{"unconfirmed", "provisional", "contributed", "approved", ""}
|
||||
|
||||
// ParseDraft returns the Draft value corresponding to the given string. The
|
||||
// empty string corresponds to Approved.
|
||||
func ParseDraft(level string) (Draft, error) {
|
||||
if level == "" {
|
||||
return Approved, nil
|
||||
}
|
||||
for i, s := range drafts {
|
||||
if level == s {
|
||||
return Unconfirmed - Draft(i), nil
|
||||
}
|
||||
}
|
||||
return Approved, fmt.Errorf("cldr: unknown draft level %q", level)
|
||||
}
|
||||
|
||||
func (d Draft) String() string {
|
||||
return drafts[len(drafts)-1-int(d)]
|
||||
}
|
||||
|
||||
// SetDraftLevel sets which draft levels to include in the evaluated LDML.
|
||||
// Any draft element for which the draft level is higher than lev will be excluded.
|
||||
// If multiple draft levels are available for a single element, the one with the
|
||||
// lowest draft level will be selected, unless preferDraft is true, in which case
|
||||
// the highest draft will be chosen.
|
||||
// It is assumed that the underlying LDML is canonicalized.
|
||||
func (cldr *CLDR) SetDraftLevel(lev Draft, preferDraft bool) {
|
||||
// TODO: implement
|
||||
cldr.resolved = make(map[string]*LDML)
|
||||
}
|
||||
|
||||
// RawLDML returns the LDML XML for id in unresolved form.
|
||||
// id must be one of the strings returned by Locales.
|
||||
func (cldr *CLDR) RawLDML(loc string) *LDML {
|
||||
return cldr.locale[loc]
|
||||
}
|
||||
|
||||
// LDML returns the fully resolved LDML XML for loc, which must be one of
|
||||
// the strings returned by Locales.
|
||||
func (cldr *CLDR) LDML(loc string) (*LDML, error) {
|
||||
return cldr.resolve(loc)
|
||||
}
|
||||
|
||||
// Supplemental returns the parsed supplemental data. If no such data was parsed,
|
||||
// nil is returned.
|
||||
func (cldr *CLDR) Supplemental() *SupplementalData {
|
||||
return cldr.supp
|
||||
}
|
||||
|
||||
// Locales returns the locales for which there exist files.
|
||||
// Valid sublocales for which there is no file are not included.
|
||||
// The root locale is always sorted first.
|
||||
func (cldr *CLDR) Locales() []string {
|
||||
loc := []string{"root"}
|
||||
hasRoot := false
|
||||
for l, _ := range cldr.locale {
|
||||
if l == "root" {
|
||||
hasRoot = true
|
||||
continue
|
||||
}
|
||||
loc = append(loc, l)
|
||||
}
|
||||
sort.Strings(loc[1:])
|
||||
if !hasRoot {
|
||||
return loc[1:]
|
||||
}
|
||||
return loc
|
||||
}
|
||||
|
||||
// Get fills in the fields of x based on the XPath path.
|
||||
func Get(e Elem, path string) (res Elem, err error) {
|
||||
return walkXPath(e, path)
|
||||
}
|
359
vendor/golang.org/x/text/unicode/cldr/collate.go
generated
vendored
359
vendor/golang.org/x/text/unicode/cldr/collate.go
generated
vendored
@ -1,359 +0,0 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cldr
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// RuleProcessor can be passed to Collator's Process method, which
|
||||
// parses the rules and calls the respective method for each rule found.
|
||||
type RuleProcessor interface {
|
||||
Reset(anchor string, before int) error
|
||||
Insert(level int, str, context, extend string) error
|
||||
Index(id string)
|
||||
}
|
||||
|
||||
const (
|
||||
// cldrIndex is a Unicode-reserved sentinel value used to mark the start
|
||||
// of a grouping within an index.
|
||||
// We ignore any rule that starts with this rune.
|
||||
// See http://unicode.org/reports/tr35/#Collation_Elements for details.
|
||||
cldrIndex = "\uFDD0"
|
||||
|
||||
// specialAnchor is the format in which to represent logical reset positions,
|
||||
// such as "first tertiary ignorable".
|
||||
specialAnchor = "<%s/>"
|
||||
)
|
||||
|
||||
// Process parses the rules for the tailorings of this collation
|
||||
// and calls the respective methods of p for each rule found.
|
||||
func (c Collation) Process(p RuleProcessor) (err error) {
|
||||
if len(c.Cr) > 0 {
|
||||
if len(c.Cr) > 1 {
|
||||
return fmt.Errorf("multiple cr elements, want 0 or 1")
|
||||
}
|
||||
return processRules(p, c.Cr[0].Data())
|
||||
}
|
||||
if c.Rules.Any != nil {
|
||||
return c.processXML(p)
|
||||
}
|
||||
return errors.New("no tailoring data")
|
||||
}
|
||||
|
||||
// processRules parses rules in the Collation Rule Syntax defined in
|
||||
// http://www.unicode.org/reports/tr35/tr35-collation.html#Collation_Tailorings.
|
||||
func processRules(p RuleProcessor, s string) (err error) {
|
||||
chk := func(s string, e error) string {
|
||||
if err == nil {
|
||||
err = e
|
||||
}
|
||||
return s
|
||||
}
|
||||
i := 0 // Save the line number for use after the loop.
|
||||
scanner := bufio.NewScanner(strings.NewReader(s))
|
||||
for ; scanner.Scan() && err == nil; i++ {
|
||||
for s := skipSpace(scanner.Text()); s != "" && s[0] != '#'; s = skipSpace(s) {
|
||||
level := 5
|
||||
var ch byte
|
||||
switch ch, s = s[0], s[1:]; ch {
|
||||
case '&': // followed by <anchor> or '[' <key> ']'
|
||||
if s = skipSpace(s); consume(&s, '[') {
|
||||
s = chk(parseSpecialAnchor(p, s))
|
||||
} else {
|
||||
s = chk(parseAnchor(p, 0, s))
|
||||
}
|
||||
case '<': // sort relation '<'{1,4}, optionally followed by '*'.
|
||||
for level = 1; consume(&s, '<'); level++ {
|
||||
}
|
||||
if level > 4 {
|
||||
err = fmt.Errorf("level %d > 4", level)
|
||||
}
|
||||
fallthrough
|
||||
case '=': // identity relation, optionally followed by *.
|
||||
if consume(&s, '*') {
|
||||
s = chk(parseSequence(p, level, s))
|
||||
} else {
|
||||
s = chk(parseOrder(p, level, s))
|
||||
}
|
||||
default:
|
||||
chk("", fmt.Errorf("illegal operator %q", ch))
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if chk("", scanner.Err()); err != nil {
|
||||
return fmt.Errorf("%d: %v", i, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseSpecialAnchor parses the anchor syntax which is either of the form
|
||||
// ['before' <level>] <anchor>
|
||||
// or
|
||||
// [<label>]
|
||||
// The starting should already be consumed.
|
||||
func parseSpecialAnchor(p RuleProcessor, s string) (tail string, err error) {
|
||||
i := strings.IndexByte(s, ']')
|
||||
if i == -1 {
|
||||
return "", errors.New("unmatched bracket")
|
||||
}
|
||||
a := strings.TrimSpace(s[:i])
|
||||
s = s[i+1:]
|
||||
if strings.HasPrefix(a, "before ") {
|
||||
l, err := strconv.ParseUint(skipSpace(a[len("before "):]), 10, 3)
|
||||
if err != nil {
|
||||
return s, err
|
||||
}
|
||||
return parseAnchor(p, int(l), s)
|
||||
}
|
||||
return s, p.Reset(fmt.Sprintf(specialAnchor, a), 0)
|
||||
}
|
||||
|
||||
func parseAnchor(p RuleProcessor, level int, s string) (tail string, err error) {
|
||||
anchor, s, err := scanString(s)
|
||||
if err != nil {
|
||||
return s, err
|
||||
}
|
||||
return s, p.Reset(anchor, level)
|
||||
}
|
||||
|
||||
func parseOrder(p RuleProcessor, level int, s string) (tail string, err error) {
|
||||
var value, context, extend string
|
||||
if value, s, err = scanString(s); err != nil {
|
||||
return s, err
|
||||
}
|
||||
if strings.HasPrefix(value, cldrIndex) {
|
||||
p.Index(value[len(cldrIndex):])
|
||||
return
|
||||
}
|
||||
if consume(&s, '|') {
|
||||
if context, s, err = scanString(s); err != nil {
|
||||
return s, errors.New("missing string after context")
|
||||
}
|
||||
}
|
||||
if consume(&s, '/') {
|
||||
if extend, s, err = scanString(s); err != nil {
|
||||
return s, errors.New("missing string after extension")
|
||||
}
|
||||
}
|
||||
return s, p.Insert(level, value, context, extend)
|
||||
}
|
||||
|
||||
// scanString scans a single input string.
|
||||
func scanString(s string) (str, tail string, err error) {
|
||||
if s = skipSpace(s); s == "" {
|
||||
return s, s, errors.New("missing string")
|
||||
}
|
||||
buf := [16]byte{} // small but enough to hold most cases.
|
||||
value := buf[:0]
|
||||
for s != "" {
|
||||
if consume(&s, '\'') {
|
||||
i := strings.IndexByte(s, '\'')
|
||||
if i == -1 {
|
||||
return "", "", errors.New(`unmatched single quote`)
|
||||
}
|
||||
if i == 0 {
|
||||
value = append(value, '\'')
|
||||
} else {
|
||||
value = append(value, s[:i]...)
|
||||
}
|
||||
s = s[i+1:]
|
||||
continue
|
||||
}
|
||||
r, sz := utf8.DecodeRuneInString(s)
|
||||
if unicode.IsSpace(r) || strings.ContainsRune("&<=#", r) {
|
||||
break
|
||||
}
|
||||
value = append(value, s[:sz]...)
|
||||
s = s[sz:]
|
||||
}
|
||||
return string(value), skipSpace(s), nil
|
||||
}
|
||||
|
||||
func parseSequence(p RuleProcessor, level int, s string) (tail string, err error) {
|
||||
if s = skipSpace(s); s == "" {
|
||||
return s, errors.New("empty sequence")
|
||||
}
|
||||
last := rune(0)
|
||||
for s != "" {
|
||||
r, sz := utf8.DecodeRuneInString(s)
|
||||
s = s[sz:]
|
||||
|
||||
if r == '-' {
|
||||
// We have a range. The first element was already written.
|
||||
if last == 0 {
|
||||
return s, errors.New("range without starter value")
|
||||
}
|
||||
r, sz = utf8.DecodeRuneInString(s)
|
||||
s = s[sz:]
|
||||
if r == utf8.RuneError || r < last {
|
||||
return s, fmt.Errorf("invalid range %q-%q", last, r)
|
||||
}
|
||||
for i := last + 1; i <= r; i++ {
|
||||
if err := p.Insert(level, string(i), "", ""); err != nil {
|
||||
return s, err
|
||||
}
|
||||
}
|
||||
last = 0
|
||||
continue
|
||||
}
|
||||
|
||||
if unicode.IsSpace(r) || unicode.IsPunct(r) {
|
||||
break
|
||||
}
|
||||
|
||||
// normal case
|
||||
if err := p.Insert(level, string(r), "", ""); err != nil {
|
||||
return s, err
|
||||
}
|
||||
last = r
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func skipSpace(s string) string {
|
||||
return strings.TrimLeftFunc(s, unicode.IsSpace)
|
||||
}
|
||||
|
||||
// consumes returns whether the next byte is ch. If so, it gobbles it by
|
||||
// updating s.
|
||||
func consume(s *string, ch byte) (ok bool) {
|
||||
if *s == "" || (*s)[0] != ch {
|
||||
return false
|
||||
}
|
||||
*s = (*s)[1:]
|
||||
return true
|
||||
}
|
||||
|
||||
// The following code parses Collation rules of CLDR version 24 and before.
|
||||
|
||||
var lmap = map[byte]int{
|
||||
'p': 1,
|
||||
's': 2,
|
||||
't': 3,
|
||||
'i': 5,
|
||||
}
|
||||
|
||||
type rulesElem struct {
|
||||
Rules struct {
|
||||
Common
|
||||
Any []*struct {
|
||||
XMLName xml.Name
|
||||
rule
|
||||
} `xml:",any"`
|
||||
} `xml:"rules"`
|
||||
}
|
||||
|
||||
type rule struct {
|
||||
Value string `xml:",chardata"`
|
||||
Before string `xml:"before,attr"`
|
||||
Any []*struct {
|
||||
XMLName xml.Name
|
||||
rule
|
||||
} `xml:",any"`
|
||||
}
|
||||
|
||||
var emptyValueError = errors.New("cldr: empty rule value")
|
||||
|
||||
func (r *rule) value() (string, error) {
|
||||
// Convert hexadecimal Unicode codepoint notation to a string.
|
||||
s := charRe.ReplaceAllStringFunc(r.Value, replaceUnicode)
|
||||
r.Value = s
|
||||
if s == "" {
|
||||
if len(r.Any) != 1 {
|
||||
return "", emptyValueError
|
||||
}
|
||||
r.Value = fmt.Sprintf(specialAnchor, r.Any[0].XMLName.Local)
|
||||
r.Any = nil
|
||||
} else if len(r.Any) != 0 {
|
||||
return "", fmt.Errorf("cldr: XML elements found in collation rule: %v", r.Any)
|
||||
}
|
||||
return r.Value, nil
|
||||
}
|
||||
|
||||
func (r rule) process(p RuleProcessor, name, context, extend string) error {
|
||||
v, err := r.value()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch name {
|
||||
case "p", "s", "t", "i":
|
||||
if strings.HasPrefix(v, cldrIndex) {
|
||||
p.Index(v[len(cldrIndex):])
|
||||
return nil
|
||||
}
|
||||
if err := p.Insert(lmap[name[0]], v, context, extend); err != nil {
|
||||
return err
|
||||
}
|
||||
case "pc", "sc", "tc", "ic":
|
||||
level := lmap[name[0]]
|
||||
for _, s := range v {
|
||||
if err := p.Insert(level, string(s), context, extend); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("cldr: unsupported tag: %q", name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// processXML parses the format of CLDR versions 24 and older.
|
||||
func (c Collation) processXML(p RuleProcessor) (err error) {
|
||||
// Collation is generated and defined in xml.go.
|
||||
var v string
|
||||
for _, r := range c.Rules.Any {
|
||||
switch r.XMLName.Local {
|
||||
case "reset":
|
||||
level := 0
|
||||
switch r.Before {
|
||||
case "primary", "1":
|
||||
level = 1
|
||||
case "secondary", "2":
|
||||
level = 2
|
||||
case "tertiary", "3":
|
||||
level = 3
|
||||
case "":
|
||||
default:
|
||||
return fmt.Errorf("cldr: unknown level %q", r.Before)
|
||||
}
|
||||
v, err = r.value()
|
||||
if err == nil {
|
||||
err = p.Reset(v, level)
|
||||
}
|
||||
case "x":
|
||||
var context, extend string
|
||||
for _, r1 := range r.Any {
|
||||
v, err = r1.value()
|
||||
switch r1.XMLName.Local {
|
||||
case "context":
|
||||
context = v
|
||||
case "extend":
|
||||
extend = v
|
||||
}
|
||||
}
|
||||
for _, r1 := range r.Any {
|
||||
if t := r1.XMLName.Local; t == "context" || t == "extend" {
|
||||
continue
|
||||
}
|
||||
r1.rule.process(p, r1.XMLName.Local, context, extend)
|
||||
}
|
||||
default:
|
||||
err = r.rule.process(p, r.XMLName.Local, "", "")
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
171
vendor/golang.org/x/text/unicode/cldr/decode.go
generated
vendored
171
vendor/golang.org/x/text/unicode/cldr/decode.go
generated
vendored
@ -1,171 +0,0 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cldr
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
// A Decoder loads an archive of CLDR data.
|
||||
type Decoder struct {
|
||||
dirFilter []string
|
||||
sectionFilter []string
|
||||
loader Loader
|
||||
cldr *CLDR
|
||||
curLocale string
|
||||
}
|
||||
|
||||
// SetSectionFilter takes a list top-level LDML element names to which
|
||||
// evaluation of LDML should be limited. It automatically calls SetDirFilter.
|
||||
func (d *Decoder) SetSectionFilter(filter ...string) {
|
||||
d.sectionFilter = filter
|
||||
// TODO: automatically set dir filter
|
||||
}
|
||||
|
||||
// SetDirFilter limits the loading of LDML XML files of the specied directories.
|
||||
// Note that sections may be split across directories differently for different CLDR versions.
|
||||
// For more robust code, use SetSectionFilter.
|
||||
func (d *Decoder) SetDirFilter(dir ...string) {
|
||||
d.dirFilter = dir
|
||||
}
|
||||
|
||||
// A Loader provides access to the files of a CLDR archive.
|
||||
type Loader interface {
|
||||
Len() int
|
||||
Path(i int) string
|
||||
Reader(i int) (io.ReadCloser, error)
|
||||
}
|
||||
|
||||
var fileRe = regexp.MustCompile(`.*[/\\](.*)[/\\](.*)\.xml`)
|
||||
|
||||
// Decode loads and decodes the files represented by l.
|
||||
func (d *Decoder) Decode(l Loader) (cldr *CLDR, err error) {
|
||||
d.cldr = makeCLDR()
|
||||
for i := 0; i < l.Len(); i++ {
|
||||
fname := l.Path(i)
|
||||
if m := fileRe.FindStringSubmatch(fname); m != nil {
|
||||
if len(d.dirFilter) > 0 && !in(d.dirFilter, m[1]) {
|
||||
continue
|
||||
}
|
||||
var r io.Reader
|
||||
if r, err = l.Reader(i); err == nil {
|
||||
err = d.decode(m[1], m[2], r)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
d.cldr.finalize(d.sectionFilter)
|
||||
return d.cldr, nil
|
||||
}
|
||||
|
||||
func (d *Decoder) decode(dir, id string, r io.Reader) error {
|
||||
var v interface{}
|
||||
var l *LDML
|
||||
cldr := d.cldr
|
||||
switch {
|
||||
case dir == "supplemental":
|
||||
v = cldr.supp
|
||||
case dir == "transforms":
|
||||
return nil
|
||||
case dir == "bcp47":
|
||||
v = cldr.bcp47
|
||||
case dir == "validity":
|
||||
return nil
|
||||
default:
|
||||
ok := false
|
||||
if v, ok = cldr.locale[id]; !ok {
|
||||
l = &LDML{}
|
||||
v, cldr.locale[id] = l, l
|
||||
}
|
||||
}
|
||||
x := xml.NewDecoder(r)
|
||||
if err := x.Decode(v); err != nil {
|
||||
log.Printf("%s/%s: %v", dir, id, err)
|
||||
return err
|
||||
}
|
||||
if l != nil {
|
||||
if l.Identity == nil {
|
||||
return fmt.Errorf("%s/%s: missing identity element", dir, id)
|
||||
}
|
||||
// TODO: verify when CLDR bug http://unicode.org/cldr/trac/ticket/8970
|
||||
// is resolved.
|
||||
// path := strings.Split(id, "_")
|
||||
// if lang := l.Identity.Language.Type; lang != path[0] {
|
||||
// return fmt.Errorf("%s/%s: language was %s; want %s", dir, id, lang, path[0])
|
||||
// }
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type pathLoader []string
|
||||
|
||||
func makePathLoader(path string) (pl pathLoader, err error) {
|
||||
err = filepath.Walk(path, func(path string, _ os.FileInfo, err error) error {
|
||||
pl = append(pl, path)
|
||||
return err
|
||||
})
|
||||
return pl, err
|
||||
}
|
||||
|
||||
func (pl pathLoader) Len() int {
|
||||
return len(pl)
|
||||
}
|
||||
|
||||
func (pl pathLoader) Path(i int) string {
|
||||
return pl[i]
|
||||
}
|
||||
|
||||
func (pl pathLoader) Reader(i int) (io.ReadCloser, error) {
|
||||
return os.Open(pl[i])
|
||||
}
|
||||
|
||||
// DecodePath loads CLDR data from the given path.
|
||||
func (d *Decoder) DecodePath(path string) (cldr *CLDR, err error) {
|
||||
loader, err := makePathLoader(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d.Decode(loader)
|
||||
}
|
||||
|
||||
type zipLoader struct {
|
||||
r *zip.Reader
|
||||
}
|
||||
|
||||
func (zl zipLoader) Len() int {
|
||||
return len(zl.r.File)
|
||||
}
|
||||
|
||||
func (zl zipLoader) Path(i int) string {
|
||||
return zl.r.File[i].Name
|
||||
}
|
||||
|
||||
func (zl zipLoader) Reader(i int) (io.ReadCloser, error) {
|
||||
return zl.r.File[i].Open()
|
||||
}
|
||||
|
||||
// DecodeZip loads CLDR data from the zip archive for which r is the source.
|
||||
func (d *Decoder) DecodeZip(r io.Reader) (cldr *CLDR, err error) {
|
||||
buffer, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
archive, err := zip.NewReader(bytes.NewReader(buffer), int64(len(buffer)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d.Decode(zipLoader{archive})
|
||||
}
|
400
vendor/golang.org/x/text/unicode/cldr/makexml.go
generated
vendored
400
vendor/golang.org/x/text/unicode/cldr/makexml.go
generated
vendored
@ -1,400 +0,0 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
// This tool generates types for the various XML formats of CLDR.
|
||||
package main
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/text/internal/gen"
|
||||
)
|
||||
|
||||
var outputFile = flag.String("output", "xml.go", "output file name")
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
r := gen.OpenCLDRCoreZip()
|
||||
buffer, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
log.Fatal("Could not read zip file")
|
||||
}
|
||||
r.Close()
|
||||
z, err := zip.NewReader(bytes.NewReader(buffer), int64(len(buffer)))
|
||||
if err != nil {
|
||||
log.Fatalf("Could not read zip archive: %v", err)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
|
||||
version := gen.CLDRVersion()
|
||||
|
||||
for _, dtd := range files {
|
||||
for _, f := range z.File {
|
||||
if strings.HasSuffix(f.Name, dtd.file+".dtd") {
|
||||
r, err := f.Open()
|
||||
failOnError(err)
|
||||
|
||||
b := makeBuilder(&buf, dtd)
|
||||
b.parseDTD(r)
|
||||
b.resolve(b.index[dtd.top[0]])
|
||||
b.write()
|
||||
if b.version != "" && version != b.version {
|
||||
println(f.Name)
|
||||
log.Fatalf("main: inconsistent versions: found %s; want %s", b.version, version)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
fmt.Fprintln(&buf, "// Version is the version of CLDR from which the XML definitions are generated.")
|
||||
fmt.Fprintf(&buf, "const Version = %q\n", version)
|
||||
|
||||
gen.WriteGoFile(*outputFile, "cldr", buf.Bytes())
|
||||
}
|
||||
|
||||
func failOnError(err error) {
|
||||
if err != nil {
|
||||
log.New(os.Stderr, "", log.Lshortfile).Output(2, err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// configuration data per DTD type
|
||||
type dtd struct {
|
||||
file string // base file name
|
||||
root string // Go name of the root XML element
|
||||
top []string // create a different type for this section
|
||||
|
||||
skipElem []string // hard-coded or deprecated elements
|
||||
skipAttr []string // attributes to exclude
|
||||
predefined []string // hard-coded elements exist of the form <name>Elem
|
||||
forceRepeat []string // elements to make slices despite DTD
|
||||
}
|
||||
|
||||
var files = []dtd{
|
||||
{
|
||||
file: "ldmlBCP47",
|
||||
root: "LDMLBCP47",
|
||||
top: []string{"ldmlBCP47"},
|
||||
skipElem: []string{
|
||||
"cldrVersion", // deprecated, not used
|
||||
},
|
||||
},
|
||||
{
|
||||
file: "ldmlSupplemental",
|
||||
root: "SupplementalData",
|
||||
top: []string{"supplementalData"},
|
||||
skipElem: []string{
|
||||
"cldrVersion", // deprecated, not used
|
||||
},
|
||||
forceRepeat: []string{
|
||||
"plurals", // data defined in plurals.xml and ordinals.xml
|
||||
},
|
||||
},
|
||||
{
|
||||
file: "ldml",
|
||||
root: "LDML",
|
||||
top: []string{
|
||||
"ldml", "collation", "calendar", "timeZoneNames", "localeDisplayNames", "numbers",
|
||||
},
|
||||
skipElem: []string{
|
||||
"cp", // not used anywhere
|
||||
"special", // not used anywhere
|
||||
"fallback", // deprecated, not used
|
||||
"alias", // in Common
|
||||
"default", // in Common
|
||||
},
|
||||
skipAttr: []string{
|
||||
"hiraganaQuarternary", // typo in DTD, correct version included as well
|
||||
},
|
||||
predefined: []string{"rules"},
|
||||
},
|
||||
}
|
||||
|
||||
var comments = map[string]string{
|
||||
"ldmlBCP47": `
|
||||
// LDMLBCP47 holds information on allowable values for various variables in LDML.
|
||||
`,
|
||||
"supplementalData": `
|
||||
// SupplementalData holds information relevant for internationalization
|
||||
// and proper use of CLDR, but that is not contained in the locale hierarchy.
|
||||
`,
|
||||
"ldml": `
|
||||
// LDML is the top-level type for locale-specific data.
|
||||
`,
|
||||
"collation": `
|
||||
// Collation contains rules that specify a certain sort-order,
|
||||
// as a tailoring of the root order.
|
||||
// The parsed rules are obtained by passing a RuleProcessor to Collation's
|
||||
// Process method.
|
||||
`,
|
||||
"calendar": `
|
||||
// Calendar specifies the fields used for formatting and parsing dates and times.
|
||||
// The month and quarter names are identified numerically, starting at 1.
|
||||
// The day (of the week) names are identified with short strings, since there is
|
||||
// no universally-accepted numeric designation.
|
||||
`,
|
||||
"dates": `
|
||||
// Dates contains information regarding the format and parsing of dates and times.
|
||||
`,
|
||||
"localeDisplayNames": `
|
||||
// LocaleDisplayNames specifies localized display names for for scripts, languages,
|
||||
// countries, currencies, and variants.
|
||||
`,
|
||||
"numbers": `
|
||||
// Numbers supplies information for formatting and parsing numbers and currencies.
|
||||
`,
|
||||
}
|
||||
|
||||
type element struct {
|
||||
name string // XML element name
|
||||
category string // elements contained by this element
|
||||
signature string // category + attrKey*
|
||||
|
||||
attr []*attribute // attributes supported by this element.
|
||||
sub []struct { // parsed and evaluated sub elements of this element.
|
||||
e *element
|
||||
repeat bool // true if the element needs to be a slice
|
||||
}
|
||||
|
||||
resolved bool // prevent multiple resolutions of this element.
|
||||
}
|
||||
|
||||
type attribute struct {
|
||||
name string
|
||||
key string
|
||||
list []string
|
||||
|
||||
tag string // Go tag
|
||||
}
|
||||
|
||||
var (
|
||||
reHead = regexp.MustCompile(` *(\w+) +([\w\-]+)`)
|
||||
reAttr = regexp.MustCompile(` *(\w+) *(?:(\w+)|\(([\w\- \|]+)\)) *(?:#([A-Z]*) *(?:\"([\.\d+])\")?)? *("[\w\-:]*")?`)
|
||||
reElem = regexp.MustCompile(`^ *(EMPTY|ANY|\(.*\)[\*\+\?]?) *$`)
|
||||
reToken = regexp.MustCompile(`\w\-`)
|
||||
)
|
||||
|
||||
// builder is used to read in the DTD files from CLDR and generate Go code
|
||||
// to be used with the encoding/xml package.
|
||||
type builder struct {
|
||||
w io.Writer
|
||||
index map[string]*element
|
||||
elem []*element
|
||||
info dtd
|
||||
version string
|
||||
}
|
||||
|
||||
func makeBuilder(w io.Writer, d dtd) builder {
|
||||
return builder{
|
||||
w: w,
|
||||
index: make(map[string]*element),
|
||||
elem: []*element{},
|
||||
info: d,
|
||||
}
|
||||
}
|
||||
|
||||
// parseDTD parses a DTD file.
|
||||
func (b *builder) parseDTD(r io.Reader) {
|
||||
for d := xml.NewDecoder(r); ; {
|
||||
t, err := d.Token()
|
||||
if t == nil {
|
||||
break
|
||||
}
|
||||
failOnError(err)
|
||||
dir, ok := t.(xml.Directive)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
m := reHead.FindSubmatch(dir)
|
||||
dir = dir[len(m[0]):]
|
||||
ename := string(m[2])
|
||||
el, elementFound := b.index[ename]
|
||||
switch string(m[1]) {
|
||||
case "ELEMENT":
|
||||
if elementFound {
|
||||
log.Fatal("parseDTD: duplicate entry for element %q", ename)
|
||||
}
|
||||
m := reElem.FindSubmatch(dir)
|
||||
if m == nil {
|
||||
log.Fatalf("parseDTD: invalid element %q", string(dir))
|
||||
}
|
||||
if len(m[0]) != len(dir) {
|
||||
log.Fatal("parseDTD: invalid element %q", string(dir), len(dir), len(m[0]), string(m[0]))
|
||||
}
|
||||
s := string(m[1])
|
||||
el = &element{
|
||||
name: ename,
|
||||
category: s,
|
||||
}
|
||||
b.index[ename] = el
|
||||
case "ATTLIST":
|
||||
if !elementFound {
|
||||
log.Fatalf("parseDTD: unknown element %q", ename)
|
||||
}
|
||||
s := string(dir)
|
||||
m := reAttr.FindStringSubmatch(s)
|
||||
if m == nil {
|
||||
log.Fatal(fmt.Errorf("parseDTD: invalid attribute %q", string(dir)))
|
||||
}
|
||||
if m[4] == "FIXED" {
|
||||
b.version = m[5]
|
||||
} else {
|
||||
switch m[1] {
|
||||
case "draft", "references", "alt", "validSubLocales", "standard" /* in Common */ :
|
||||
case "type", "choice":
|
||||
default:
|
||||
el.attr = append(el.attr, &attribute{
|
||||
name: m[1],
|
||||
key: s,
|
||||
list: reToken.FindAllString(m[3], -1),
|
||||
})
|
||||
el.signature = fmt.Sprintf("%s=%s+%s", el.signature, m[1], m[2])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var reCat = regexp.MustCompile(`[ ,\|]*(?:(\(|\)|\#?[\w_-]+)([\*\+\?]?))?`)
|
||||
|
||||
// resolve takes a parsed element and converts it into structured data
|
||||
// that can be used to generate the XML code.
|
||||
func (b *builder) resolve(e *element) {
|
||||
if e.resolved {
|
||||
return
|
||||
}
|
||||
b.elem = append(b.elem, e)
|
||||
e.resolved = true
|
||||
s := e.category
|
||||
found := make(map[string]bool)
|
||||
sequenceStart := []int{}
|
||||
for len(s) > 0 {
|
||||
m := reCat.FindStringSubmatch(s)
|
||||
if m == nil {
|
||||
log.Fatalf("%s: invalid category string %q", e.name, s)
|
||||
}
|
||||
repeat := m[2] == "*" || m[2] == "+" || in(b.info.forceRepeat, m[1])
|
||||
switch m[1] {
|
||||
case "":
|
||||
case "(":
|
||||
sequenceStart = append(sequenceStart, len(e.sub))
|
||||
case ")":
|
||||
if len(sequenceStart) == 0 {
|
||||
log.Fatalf("%s: unmatched closing parenthesis", e.name)
|
||||
}
|
||||
for i := sequenceStart[len(sequenceStart)-1]; i < len(e.sub); i++ {
|
||||
e.sub[i].repeat = e.sub[i].repeat || repeat
|
||||
}
|
||||
sequenceStart = sequenceStart[:len(sequenceStart)-1]
|
||||
default:
|
||||
if in(b.info.skipElem, m[1]) {
|
||||
} else if sub, ok := b.index[m[1]]; ok {
|
||||
if !found[sub.name] {
|
||||
e.sub = append(e.sub, struct {
|
||||
e *element
|
||||
repeat bool
|
||||
}{sub, repeat})
|
||||
found[sub.name] = true
|
||||
b.resolve(sub)
|
||||
}
|
||||
} else if m[1] == "#PCDATA" || m[1] == "ANY" {
|
||||
} else if m[1] != "EMPTY" {
|
||||
log.Fatalf("resolve:%s: element %q not found", e.name, m[1])
|
||||
}
|
||||
}
|
||||
s = s[len(m[0]):]
|
||||
}
|
||||
}
|
||||
|
||||
// return true if s is contained in set.
|
||||
func in(set []string, s string) bool {
|
||||
for _, v := range set {
|
||||
if v == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var repl = strings.NewReplacer("-", " ", "_", " ")
|
||||
|
||||
// title puts the first character or each character following '_' in title case and
|
||||
// removes all occurrences of '_'.
|
||||
func title(s string) string {
|
||||
return strings.Replace(strings.Title(repl.Replace(s)), " ", "", -1)
|
||||
}
|
||||
|
||||
// writeElem generates Go code for a single element, recursively.
|
||||
func (b *builder) writeElem(tab int, e *element) {
|
||||
p := func(f string, x ...interface{}) {
|
||||
f = strings.Replace(f, "\n", "\n"+strings.Repeat("\t", tab), -1)
|
||||
fmt.Fprintf(b.w, f, x...)
|
||||
}
|
||||
if len(e.sub) == 0 && len(e.attr) == 0 {
|
||||
p("Common")
|
||||
return
|
||||
}
|
||||
p("struct {")
|
||||
tab++
|
||||
p("\nCommon")
|
||||
for _, attr := range e.attr {
|
||||
if !in(b.info.skipAttr, attr.name) {
|
||||
p("\n%s string `xml:\"%s,attr\"`", title(attr.name), attr.name)
|
||||
}
|
||||
}
|
||||
for _, sub := range e.sub {
|
||||
if in(b.info.predefined, sub.e.name) {
|
||||
p("\n%sElem", sub.e.name)
|
||||
continue
|
||||
}
|
||||
if in(b.info.skipElem, sub.e.name) {
|
||||
continue
|
||||
}
|
||||
p("\n%s ", title(sub.e.name))
|
||||
if sub.repeat {
|
||||
p("[]")
|
||||
}
|
||||
p("*")
|
||||
if in(b.info.top, sub.e.name) {
|
||||
p(title(sub.e.name))
|
||||
} else {
|
||||
b.writeElem(tab, sub.e)
|
||||
}
|
||||
p(" `xml:\"%s\"`", sub.e.name)
|
||||
}
|
||||
tab--
|
||||
p("\n}")
|
||||
}
|
||||
|
||||
// write generates the Go XML code.
|
||||
func (b *builder) write() {
|
||||
for i, name := range b.info.top {
|
||||
e := b.index[name]
|
||||
if e != nil {
|
||||
fmt.Fprintf(b.w, comments[name])
|
||||
name := title(e.name)
|
||||
if i == 0 {
|
||||
name = b.info.root
|
||||
}
|
||||
fmt.Fprintf(b.w, "type %s ", name)
|
||||
b.writeElem(0, e)
|
||||
fmt.Fprint(b.w, "\n")
|
||||
}
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user