Compare commits
1 Commits
master
...
updatedepz
Author | SHA1 | Date | |
---|---|---|---|
|
47ba8669d9 |
8
.github/workflows/development.yml
vendored
8
.github/workflows/development.yml
vendored
@ -16,7 +16,7 @@ jobs:
|
|||||||
test-build-upload:
|
test-build-upload:
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
go-version: [1.20.x]
|
go-version: [1.21.x]
|
||||||
platform: [ubuntu-latest]
|
platform: [ubuntu-latest]
|
||||||
runs-on: ${{ matrix.platform }}
|
runs-on: ${{ matrix.platform }}
|
||||||
steps:
|
steps:
|
||||||
@ -39,19 +39,19 @@ jobs:
|
|||||||
CGO_ENABLED=0 GOOS=windows GOARCH=amd64 go build -ldflags "-s -X github.com/42wim/matterbridge/version.GitHash=$(git log --pretty=format:'%h' -n 1)" -o output/win/matterbridge-$VERSION-windows-amd64.exe
|
CGO_ENABLED=0 GOOS=windows GOARCH=amd64 go build -ldflags "-s -X github.com/42wim/matterbridge/version.GitHash=$(git log --pretty=format:'%h' -n 1)" -o output/win/matterbridge-$VERSION-windows-amd64.exe
|
||||||
CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 go build -ldflags "-s -X github.com/42wim/matterbridge/version.GitHash=$(git log --pretty=format:'%h' -n 1)" -o output/mac/matterbridge-$VERSION-darwin-amd64
|
CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 go build -ldflags "-s -X github.com/42wim/matterbridge/version.GitHash=$(git log --pretty=format:'%h' -n 1)" -o output/mac/matterbridge-$VERSION-darwin-amd64
|
||||||
- name: Upload linux 64-bit
|
- name: Upload linux 64-bit
|
||||||
if: startsWith(matrix.go-version,'1.20')
|
if: startsWith(matrix.go-version,'1.21')
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: matterbridge-linux-64bit
|
name: matterbridge-linux-64bit
|
||||||
path: output/lin
|
path: output/lin
|
||||||
- name: Upload windows 64-bit
|
- name: Upload windows 64-bit
|
||||||
if: startsWith(matrix.go-version,'1.20')
|
if: startsWith(matrix.go-version,'1.21')
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: matterbridge-windows-64bit
|
name: matterbridge-windows-64bit
|
||||||
path: output/win
|
path: output/win
|
||||||
- name: Upload darwin 64-bit
|
- name: Upload darwin 64-bit
|
||||||
if: startsWith(matrix.go-version,'1.20')
|
if: startsWith(matrix.go-version,'1.21')
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: matterbridge-darwin-64bit
|
name: matterbridge-darwin-64bit
|
||||||
|
@ -212,6 +212,7 @@ linters:
|
|||||||
- execinquery
|
- execinquery
|
||||||
- nosnakecase
|
- nosnakecase
|
||||||
- exhaustive
|
- exhaustive
|
||||||
|
- testifylint
|
||||||
# rules to deal with reported isues
|
# rules to deal with reported isues
|
||||||
issues:
|
issues:
|
||||||
# List of regexps of issue texts to exclude, empty list by default.
|
# List of regexps of issue texts to exclude, empty list by default.
|
||||||
|
76
go.mod
76
go.mod
@ -5,21 +5,21 @@ require (
|
|||||||
github.com/Benau/tgsconverter v0.0.0-20210809170556-99f4a4f6337f
|
github.com/Benau/tgsconverter v0.0.0-20210809170556-99f4a4f6337f
|
||||||
github.com/Philipp15b/go-steam v1.0.1-0.20200727090957-6ae9b3c0a560
|
github.com/Philipp15b/go-steam v1.0.1-0.20200727090957-6ae9b3c0a560
|
||||||
github.com/Rhymen/go-whatsapp v0.1.2-0.20211102134409-31a2e740845c
|
github.com/Rhymen/go-whatsapp v0.1.2-0.20211102134409-31a2e740845c
|
||||||
github.com/SevereCloud/vksdk/v2 v2.16.0
|
github.com/SevereCloud/vksdk/v2 v2.16.1
|
||||||
github.com/bwmarrin/discordgo v0.27.1
|
github.com/bwmarrin/discordgo v0.27.1
|
||||||
github.com/d5/tengo/v2 v2.16.1
|
github.com/d5/tengo/v2 v2.16.1
|
||||||
github.com/davecgh/go-spew v1.1.1
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
|
||||||
github.com/fsnotify/fsnotify v1.6.0
|
github.com/fsnotify/fsnotify v1.7.0
|
||||||
github.com/gomarkdown/markdown v0.0.0-20230716120725-531d2d74bc12
|
github.com/gomarkdown/markdown v0.0.0-20231222211730-1d6d20845b47
|
||||||
github.com/google/gops v0.3.27
|
github.com/google/gops v0.3.27
|
||||||
github.com/gorilla/schema v1.2.0
|
github.com/gorilla/schema v1.2.1
|
||||||
github.com/harmony-development/shibshib v0.0.0-20220101224523-c98059d09cfa
|
github.com/harmony-development/shibshib v0.0.0-20220101224523-c98059d09cfa
|
||||||
github.com/hashicorp/golang-lru v0.6.0
|
github.com/hashicorp/golang-lru v0.6.0
|
||||||
github.com/jpillora/backoff v1.0.0
|
github.com/jpillora/backoff v1.0.0
|
||||||
github.com/keybase/go-keybase-chat-bot v0.0.0-20221220212439-e48d9abd2c20
|
github.com/keybase/go-keybase-chat-bot v0.0.0-20221220212439-e48d9abd2c20
|
||||||
github.com/kyokomi/emoji/v2 v2.2.12
|
github.com/kyokomi/emoji/v2 v2.2.12
|
||||||
github.com/labstack/echo/v4 v4.11.1
|
github.com/labstack/echo/v4 v4.11.4
|
||||||
github.com/lrstanley/girc v0.0.0-20230729130341-dd5853a5f1a6
|
github.com/lrstanley/girc v0.0.0-20230911164840-f47717952bf9
|
||||||
github.com/matterbridge/Rocket.Chat.Go.SDK v0.0.0-20211016222428-79310a412696
|
github.com/matterbridge/Rocket.Chat.Go.SDK v0.0.0-20211016222428-79310a412696
|
||||||
github.com/matterbridge/go-xmpp v0.0.0-20211030125215-791a06c5f1be
|
github.com/matterbridge/go-xmpp v0.0.0-20211030125215-791a06c5f1be
|
||||||
github.com/matterbridge/gomatrix v0.0.0-20220411225302-271e5088ea27
|
github.com/matterbridge/gomatrix v0.0.0-20220411225302-271e5088ea27
|
||||||
@ -39,21 +39,21 @@ require (
|
|||||||
github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d
|
github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d
|
||||||
github.com/shazow/ssh-chat v1.10.1
|
github.com/shazow/ssh-chat v1.10.1
|
||||||
github.com/sirupsen/logrus v1.9.3
|
github.com/sirupsen/logrus v1.9.3
|
||||||
github.com/slack-go/slack v0.12.2
|
github.com/slack-go/slack v0.12.3
|
||||||
github.com/spf13/viper v1.16.0
|
github.com/spf13/viper v1.18.2
|
||||||
github.com/stretchr/testify v1.8.4
|
github.com/stretchr/testify v1.8.4
|
||||||
github.com/vincent-petithory/dataurl v1.0.0
|
github.com/vincent-petithory/dataurl v1.0.0
|
||||||
github.com/writeas/go-strip-markdown v2.0.1+incompatible
|
github.com/writeas/go-strip-markdown v2.0.1+incompatible
|
||||||
github.com/yaegashi/msgraph.go v0.1.4
|
github.com/yaegashi/msgraph.go v0.1.4
|
||||||
github.com/zfjagann/golang-ring v0.0.0-20220330170733-19bcea1b6289
|
github.com/zfjagann/golang-ring v0.0.0-20220330170733-19bcea1b6289
|
||||||
go.mau.fi/whatsmeow v0.0.0-20230805111647-405414b9b5c0
|
go.mau.fi/whatsmeow v0.0.0-20240106083317-757651400d8d
|
||||||
golang.org/x/image v0.11.0
|
golang.org/x/image v0.15.0
|
||||||
golang.org/x/oauth2 v0.11.0
|
golang.org/x/oauth2 v0.15.0
|
||||||
golang.org/x/text v0.12.0
|
golang.org/x/text v0.14.0
|
||||||
gomod.garykim.dev/nc-talk v0.3.0
|
gomod.garykim.dev/nc-talk v0.3.0
|
||||||
google.golang.org/protobuf v1.31.0
|
google.golang.org/protobuf v1.31.0
|
||||||
layeh.com/gumble v0.0.0-20221205141517-d1df60a3cc14
|
layeh.com/gumble v0.0.0-20221205141517-d1df60a3cc14
|
||||||
modernc.org/sqlite v1.25.0
|
modernc.org/sqlite v1.28.0
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
@ -69,7 +69,7 @@ require (
|
|||||||
github.com/go-asn1-ber/asn1-ber v1.5.3 // indirect
|
github.com/go-asn1-ber/asn1-ber v1.5.3 // indirect
|
||||||
github.com/golang-jwt/jwt v3.2.2+incompatible // indirect
|
github.com/golang-jwt/jwt v3.2.2+incompatible // indirect
|
||||||
github.com/golang/protobuf v1.5.3 // indirect
|
github.com/golang/protobuf v1.5.3 // indirect
|
||||||
github.com/google/uuid v1.3.0 // indirect
|
github.com/google/uuid v1.4.0 // indirect
|
||||||
github.com/gopackage/ddp v0.0.3 // indirect
|
github.com/gopackage/ddp v0.0.3 // indirect
|
||||||
github.com/gorilla/websocket v1.5.0 // indirect
|
github.com/gorilla/websocket v1.5.0 // indirect
|
||||||
github.com/graph-gophers/graphql-go v1.3.0 // indirect
|
github.com/graph-gophers/graphql-go v1.3.0 // indirect
|
||||||
@ -77,15 +77,15 @@ require (
|
|||||||
github.com/json-iterator/go v1.1.12 // indirect
|
github.com/json-iterator/go v1.1.12 // indirect
|
||||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
||||||
github.com/kettek/apng v0.0.0-20191108220231-414630eed80f // indirect
|
github.com/kettek/apng v0.0.0-20191108220231-414630eed80f // indirect
|
||||||
github.com/klauspost/compress v1.16.0 // indirect
|
github.com/klauspost/compress v1.17.0 // indirect
|
||||||
github.com/klauspost/cpuid/v2 v2.2.3 // indirect
|
github.com/klauspost/cpuid/v2 v2.2.3 // indirect
|
||||||
github.com/labstack/gommon v0.4.0 // indirect
|
github.com/labstack/gommon v0.4.2 // indirect
|
||||||
github.com/magiconair/properties v1.8.7 // indirect
|
github.com/magiconair/properties v1.8.7 // indirect
|
||||||
github.com/mattermost/go-i18n v1.11.1-0.20211013152124-5c415071e404 // indirect
|
github.com/mattermost/go-i18n v1.11.1-0.20211013152124-5c415071e404 // indirect
|
||||||
github.com/mattermost/ldap v0.0.0-20201202150706-ee0e6284187d // indirect
|
github.com/mattermost/ldap v0.0.0-20201202150706-ee0e6284187d // indirect
|
||||||
github.com/mattermost/logr/v2 v2.0.15 // indirect
|
github.com/mattermost/logr/v2 v2.0.15 // indirect
|
||||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||||
github.com/mattn/go-runewidth v0.0.13 // indirect
|
github.com/mattn/go-runewidth v0.0.13 // indirect
|
||||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
||||||
github.com/minio/md5-simd v1.1.2 // indirect
|
github.com/minio/md5-simd v1.1.2 // indirect
|
||||||
@ -98,37 +98,43 @@ require (
|
|||||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||||
github.com/pborman/uuid v1.2.1 // indirect
|
github.com/pborman/uuid v1.2.1 // indirect
|
||||||
github.com/pelletier/go-toml v1.9.5 // indirect
|
github.com/pelletier/go-toml v1.9.5 // indirect
|
||||||
github.com/pelletier/go-toml/v2 v2.0.8 // indirect
|
github.com/pelletier/go-toml/v2 v2.1.0 // indirect
|
||||||
github.com/philhofer/fwd v1.1.1 // indirect
|
github.com/philhofer/fwd v1.1.1 // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||||
github.com/rickb777/date v1.12.4 // indirect
|
github.com/rickb777/date v1.12.4 // indirect
|
||||||
github.com/rickb777/plural v1.2.0 // indirect
|
github.com/rickb777/plural v1.2.0 // indirect
|
||||||
github.com/rivo/uniseg v0.2.0 // indirect
|
github.com/rivo/uniseg v0.2.0 // indirect
|
||||||
|
github.com/sagikazarmark/locafero v0.4.0 // indirect
|
||||||
|
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
|
||||||
github.com/shazow/rateio v0.0.0-20200113175441-4461efc8bdc4 // indirect
|
github.com/shazow/rateio v0.0.0-20200113175441-4461efc8bdc4 // indirect
|
||||||
github.com/sizeofint/webpanimation v0.0.0-20210809145948-1d2b32119882 // indirect
|
github.com/sizeofint/webpanimation v0.0.0-20210809145948-1d2b32119882 // indirect
|
||||||
github.com/skip2/go-qrcode v0.0.0-20190110000554-dc11ecdae0a9 // indirect
|
github.com/skip2/go-qrcode v0.0.0-20190110000554-dc11ecdae0a9 // indirect
|
||||||
github.com/spf13/afero v1.9.5 // indirect
|
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||||
github.com/spf13/cast v1.5.1 // indirect
|
github.com/spf13/afero v1.11.0 // indirect
|
||||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
github.com/spf13/cast v1.6.0 // indirect
|
||||||
github.com/spf13/pflag v1.0.5 // indirect
|
github.com/spf13/pflag v1.0.5 // indirect
|
||||||
github.com/subosito/gotenv v1.4.2 // indirect
|
github.com/subosito/gotenv v1.6.0 // indirect
|
||||||
github.com/tinylib/msgp v1.1.6 // indirect
|
github.com/tinylib/msgp v1.1.6 // indirect
|
||||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||||
github.com/valyala/fasttemplate v1.2.2 // indirect
|
github.com/valyala/fasttemplate v1.2.2 // indirect
|
||||||
github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect
|
github.com/vmihailenco/msgpack/v5 v5.4.0 // indirect
|
||||||
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
|
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
|
||||||
github.com/wiggin77/merror v1.0.3 // indirect
|
github.com/wiggin77/merror v1.0.3 // indirect
|
||||||
github.com/wiggin77/srslog v1.0.1 // indirect
|
github.com/wiggin77/srslog v1.0.1 // indirect
|
||||||
go.mau.fi/libsignal v0.1.0 // indirect
|
go.mau.fi/libsignal v0.1.0 // indirect
|
||||||
golang.org/x/crypto v0.12.0 // indirect
|
go.mau.fi/util v0.2.0 // indirect
|
||||||
golang.org/x/mod v0.8.0 // indirect
|
go.uber.org/atomic v1.9.0 // indirect
|
||||||
golang.org/x/net v0.14.0 // indirect
|
go.uber.org/multierr v1.9.0 // indirect
|
||||||
golang.org/x/sys v0.11.0 // indirect
|
golang.org/x/crypto v0.17.0 // indirect
|
||||||
golang.org/x/term v0.11.0 // indirect
|
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect
|
||||||
golang.org/x/time v0.3.0 // indirect
|
golang.org/x/mod v0.12.0 // indirect
|
||||||
golang.org/x/tools v0.6.0 // indirect
|
golang.org/x/net v0.19.0 // indirect
|
||||||
|
golang.org/x/sys v0.15.0 // indirect
|
||||||
|
golang.org/x/term v0.15.0 // indirect
|
||||||
|
golang.org/x/time v0.5.0 // indirect
|
||||||
|
golang.org/x/tools v0.13.0 // indirect
|
||||||
google.golang.org/appengine v1.6.7 // indirect
|
google.golang.org/appengine v1.6.7 // indirect
|
||||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
|
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
|
||||||
@ -137,9 +143,9 @@ require (
|
|||||||
lukechampine.com/uint128 v1.2.0 // indirect
|
lukechampine.com/uint128 v1.2.0 // indirect
|
||||||
modernc.org/cc/v3 v3.40.0 // indirect
|
modernc.org/cc/v3 v3.40.0 // indirect
|
||||||
modernc.org/ccgo/v3 v3.16.13 // indirect
|
modernc.org/ccgo/v3 v3.16.13 // indirect
|
||||||
modernc.org/libc v1.24.1 // indirect
|
modernc.org/libc v1.29.0 // indirect
|
||||||
modernc.org/mathutil v1.5.0 // indirect
|
modernc.org/mathutil v1.6.0 // indirect
|
||||||
modernc.org/memory v1.6.0 // indirect
|
modernc.org/memory v1.7.2 // indirect
|
||||||
modernc.org/opt v0.1.3 // indirect
|
modernc.org/opt v0.1.3 // indirect
|
||||||
modernc.org/strutil v1.1.3 // indirect
|
modernc.org/strutil v1.1.3 // indirect
|
||||||
modernc.org/token v1.0.1 // indirect
|
modernc.org/token v1.0.1 // indirect
|
||||||
|
176
go.sum
176
go.sum
@ -6,7 +6,6 @@ cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgo
|
|||||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||||
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||||
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||||
cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
|
||||||
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||||
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
||||||
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
|
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
|
||||||
@ -19,7 +18,6 @@ cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOY
|
|||||||
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
|
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
|
||||||
cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
|
cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
|
||||||
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
|
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
|
||||||
cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
|
|
||||||
cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
|
cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
|
||||||
cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
|
cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
|
||||||
cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
|
cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
|
||||||
@ -46,7 +44,6 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo
|
|||||||
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||||
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
|
|
||||||
code.sajari.com/docconv v1.2.0/go.mod h1:r8yfCP6OKbZ9Xkd87aBa4nfpk6ud/PoyLwex3n6cXSc=
|
code.sajari.com/docconv v1.2.0/go.mod h1:r8yfCP6OKbZ9Xkd87aBa4nfpk6ud/PoyLwex3n6cXSc=
|
||||||
dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU=
|
dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU=
|
||||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||||
@ -127,8 +124,8 @@ github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdko
|
|||||||
github.com/Rhymen/go-whatsapp v0.1.2-0.20211102134409-31a2e740845c h1:4mIZQXKYBymQ9coA82nNyG/CjicMNLBZ8cPVrhNUM3g=
|
github.com/Rhymen/go-whatsapp v0.1.2-0.20211102134409-31a2e740845c h1:4mIZQXKYBymQ9coA82nNyG/CjicMNLBZ8cPVrhNUM3g=
|
||||||
github.com/Rhymen/go-whatsapp v0.1.2-0.20211102134409-31a2e740845c/go.mod h1:DNSFRLFDFIqm2+0aJzSOVfn25020vldM4SRqz6YtLgI=
|
github.com/Rhymen/go-whatsapp v0.1.2-0.20211102134409-31a2e740845c/go.mod h1:DNSFRLFDFIqm2+0aJzSOVfn25020vldM4SRqz6YtLgI=
|
||||||
github.com/RoaringBitmap/roaring v0.9.4/go.mod h1:icnadbWcNyfEHlYdr+tDlOTih1Bf/h+rzPpv4sbomAA=
|
github.com/RoaringBitmap/roaring v0.9.4/go.mod h1:icnadbWcNyfEHlYdr+tDlOTih1Bf/h+rzPpv4sbomAA=
|
||||||
github.com/SevereCloud/vksdk/v2 v2.16.0 h1:DQ90qqwY/yF1X/SWZQs1kQ/Ik+tphK82d+S6Rch46wQ=
|
github.com/SevereCloud/vksdk/v2 v2.16.1 h1:UiazL3vTy7lMm33oIXRMxXg8S5I8bQuqEdLtbmOSpG4=
|
||||||
github.com/SevereCloud/vksdk/v2 v2.16.0/go.mod h1:VN6BH9nFUXcP7Uf0uX74Aht2DQ7+139aG3/Og+jia4w=
|
github.com/SevereCloud/vksdk/v2 v2.16.1/go.mod h1:UfVcBt8qh5+gIflQO6L+CWwrXcpwhOl5hKvKf8sXUd8=
|
||||||
github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0=
|
github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0=
|
||||||
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
|
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
|
||||||
github.com/advancedlogic/GoOse v0.0.0-20191112112754-e742535969c1/go.mod h1:f3HCSN1fBWjcpGtXyM119MJgeQl838v6so/PQOqvE1w=
|
github.com/advancedlogic/GoOse v0.0.0-20191112112754-e742535969c1/go.mod h1:f3HCSN1fBWjcpGtXyM119MJgeQl838v6so/PQOqvE1w=
|
||||||
@ -399,8 +396,9 @@ github.com/d5/tengo/v2 v2.16.1 h1:/N6dqiGu9toqANInZEOQMM8I06icdZnmb+81DG/lZdw=
|
|||||||
github.com/d5/tengo/v2 v2.16.1/go.mod h1:XRGjEs5I9jYIKTxly6HCF8oiiilk5E/RYXOZ5b0DZC8=
|
github.com/d5/tengo/v2 v2.16.1/go.mod h1:XRGjEs5I9jYIKTxly6HCF8oiiilk5E/RYXOZ5b0DZC8=
|
||||||
github.com/dave/jennifer v1.4.1/go.mod h1:7jEdnm+qBcxl8PC0zyp7vxcpSRnzXSt9r39tpTVGlwA=
|
github.com/dave/jennifer v1.4.1/go.mod h1:7jEdnm+qBcxl8PC0zyp7vxcpSRnzXSt9r39tpTVGlwA=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||||
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/dchote/go-openal v0.0.0-20171116030048-f4a9a141d372/go.mod h1:74z+CYu2/mx4N+mcIS/rsvfAxBPBV9uv8zRAnwyFkdI=
|
github.com/dchote/go-openal v0.0.0-20171116030048-f4a9a141d372/go.mod h1:74z+CYu2/mx4N+mcIS/rsvfAxBPBV9uv8zRAnwyFkdI=
|
||||||
github.com/denisenkom/go-mssqldb v0.10.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
|
github.com/denisenkom/go-mssqldb v0.10.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
|
||||||
github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0=
|
github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0=
|
||||||
@ -467,12 +465,12 @@ github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHqu
|
|||||||
github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk=
|
github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk=
|
||||||
github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
|
github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
|
||||||
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
|
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
|
||||||
github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY=
|
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||||
github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
|
github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
|
||||||
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
|
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||||
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
|
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||||
github.com/fsouza/fake-gcs-server v1.17.0/go.mod h1:D1rTE4YCyHFNa99oyJJ5HyclvN/0uQR+pM/VdlL83bw=
|
github.com/fsouza/fake-gcs-server v1.17.0/go.mod h1:D1rTE4YCyHFNa99oyJJ5HyclvN/0uQR+pM/VdlL83bw=
|
||||||
github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
|
github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
|
||||||
github.com/gabriel-vasile/mimetype v1.3.1/go.mod h1:fA8fi6KUiG7MgQQ+mEWotXoEOvmxRtOJlERCzSmRvr8=
|
github.com/gabriel-vasile/mimetype v1.3.1/go.mod h1:fA8fi6KUiG7MgQQ+mEWotXoEOvmxRtOJlERCzSmRvr8=
|
||||||
@ -625,8 +623,8 @@ github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW
|
|||||||
github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||||
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||||
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||||
github.com/gomarkdown/markdown v0.0.0-20230716120725-531d2d74bc12 h1:uK3X/2mt4tbSGoHvbLBHUny7CKiuwUip3MArtukol4E=
|
github.com/gomarkdown/markdown v0.0.0-20231222211730-1d6d20845b47 h1:k4Tw0nt6lwro3Uin8eqoET7MDA4JnT8YgbCjc/g5E3k=
|
||||||
github.com/gomarkdown/markdown v0.0.0-20230716120725-531d2d74bc12/go.mod h1:JDGcbDT52eL4fju3sZ4TeHGsQwhG9nbDV21aMyhwPoA=
|
github.com/gomarkdown/markdown v0.0.0-20231222211730-1d6d20845b47/go.mod h1:JDGcbDT52eL4fju3sZ4TeHGsQwhG9nbDV21aMyhwPoA=
|
||||||
github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
|
github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
|
||||||
github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
|
github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
|
||||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||||
@ -667,7 +665,6 @@ github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hf
|
|||||||
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||||
github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||||
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||||
github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
|
||||||
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||||
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||||
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||||
@ -680,14 +677,14 @@ github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
|
|||||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
|
||||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
|
github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4=
|
||||||
|
github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
|
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
|
||||||
github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
|
github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
|
||||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||||
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
|
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
|
||||||
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
|
|
||||||
github.com/gopackage/ddp v0.0.3 h1:fd0DxScoiS+ogq22ktey6DjDSDybtJPAn69geMpUtFc=
|
github.com/gopackage/ddp v0.0.3 h1:fd0DxScoiS+ogq22ktey6DjDSDybtJPAn69geMpUtFc=
|
||||||
github.com/gopackage/ddp v0.0.3/go.mod h1:3hUXYG6C/6JsoxKsQaK7st09+GP9RZBFPzyAlU/0SLg=
|
github.com/gopackage/ddp v0.0.3/go.mod h1:3hUXYG6C/6JsoxKsQaK7st09+GP9RZBFPzyAlU/0SLg=
|
||||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||||
@ -700,8 +697,9 @@ github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z
|
|||||||
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||||
github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||||
github.com/gorilla/schema v1.2.0 h1:YufUaxZYCKGFuAq3c96BOhjgd5nmXiOY9NGzF247Tsc=
|
|
||||||
github.com/gorilla/schema v1.2.0/go.mod h1:kgLaKoK1FELgZqMAVxx/5cbj0kT+57qxUrAlIO2eleU=
|
github.com/gorilla/schema v1.2.0/go.mod h1:kgLaKoK1FELgZqMAVxx/5cbj0kT+57qxUrAlIO2eleU=
|
||||||
|
github.com/gorilla/schema v1.2.1 h1:tjDxcmdb+siIqkTNoV+qRH2mjYdr2hHe5MKXbp61ziM=
|
||||||
|
github.com/gorilla/schema v1.2.1/go.mod h1:Dg5SSm5PV60mhF2NFaTV1xuYYj8tV8NOPRo4FggUMnM=
|
||||||
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||||
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||||
@ -895,8 +893,8 @@ github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8
|
|||||||
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||||
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||||
github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||||
github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4=
|
github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM=
|
||||||
github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
||||||
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||||
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||||
github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||||
@ -928,11 +926,11 @@ github.com/ktrysmt/go-bitbucket v0.6.4/go.mod h1:9u0v3hsd2rqCHRIpbir1oP7F58uo5dq
|
|||||||
github.com/kyokomi/emoji/v2 v2.2.12 h1:sSVA5nH9ebR3Zji1o31wu3yOwD1zKXQA2z0zUyeit60=
|
github.com/kyokomi/emoji/v2 v2.2.12 h1:sSVA5nH9ebR3Zji1o31wu3yOwD1zKXQA2z0zUyeit60=
|
||||||
github.com/kyokomi/emoji/v2 v2.2.12/go.mod h1:JUcn42DTdsXJo1SWanHh4HKDEyPaR5CqkmoirZZP9qE=
|
github.com/kyokomi/emoji/v2 v2.2.12/go.mod h1:JUcn42DTdsXJo1SWanHh4HKDEyPaR5CqkmoirZZP9qE=
|
||||||
github.com/labstack/echo/v4 v4.5.0/go.mod h1:czIriw4a0C1dFun+ObrXp7ok03xON0N1awStJ6ArI7Y=
|
github.com/labstack/echo/v4 v4.5.0/go.mod h1:czIriw4a0C1dFun+ObrXp7ok03xON0N1awStJ6ArI7Y=
|
||||||
github.com/labstack/echo/v4 v4.11.1 h1:dEpLU2FLg4UVmvCGPuk/APjlH6GDpbEPti61srUUUs4=
|
github.com/labstack/echo/v4 v4.11.4 h1:vDZmA+qNeh1pd/cCkEicDMrjtrnMGQ1QFI9gWN1zGq8=
|
||||||
github.com/labstack/echo/v4 v4.11.1/go.mod h1:YuYRTSM3CHs2ybfrL8Px48bO6BAnYIN4l8wSTMP6BDQ=
|
github.com/labstack/echo/v4 v4.11.4/go.mod h1:noh7EvLwqDsmh/X/HWKPUl1AjzJrhyptRyEbQJfxen8=
|
||||||
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
|
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
|
||||||
github.com/labstack/gommon v0.4.0 h1:y7cvthEAEbU0yHOf4axH8ZG2NH8knB9iNSoTO8dyIk8=
|
github.com/labstack/gommon v0.4.2 h1:F8qTUNXgG1+6WQmqoUWnz8WiEU60mXVVw0P4ht1WRA0=
|
||||||
github.com/labstack/gommon v0.4.0/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM=
|
github.com/labstack/gommon v0.4.2/go.mod h1:QlUFxVM+SNXhDL/Z7YhocGIBYOiwB0mXm1+1bAPHPyU=
|
||||||
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o=
|
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o=
|
||||||
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw=
|
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw=
|
||||||
github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs=
|
github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs=
|
||||||
@ -945,8 +943,8 @@ github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
|||||||
github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||||
github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||||
github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||||
github.com/lrstanley/girc v0.0.0-20230729130341-dd5853a5f1a6 h1:InzqHdpTHFBhVtlu9NB7Ot2p6Y4A9IcTjWBihkKE1JM=
|
github.com/lrstanley/girc v0.0.0-20230911164840-f47717952bf9 h1:Kgp9FtxM8VZr2wDmXhCkd/f2EW5NeXJzZSWMYQB4M4s=
|
||||||
github.com/lrstanley/girc v0.0.0-20230729130341-dd5853a5f1a6/go.mod h1:lgrnhcF8bg/Bd5HA5DOb4Z+uGqUqGnp4skr+J2GwVgI=
|
github.com/lrstanley/girc v0.0.0-20230911164840-f47717952bf9/go.mod h1:lgrnhcF8bg/Bd5HA5DOb4Z+uGqUqGnp4skr+J2GwVgI=
|
||||||
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
|
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
|
||||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||||
@ -1010,8 +1008,8 @@ github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcME
|
|||||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||||
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||||
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
|
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||||
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||||
github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||||
@ -1199,8 +1197,8 @@ github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCko
|
|||||||
github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
||||||
github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
|
github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
|
||||||
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
||||||
github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ=
|
github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4=
|
||||||
github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4=
|
github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
|
||||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||||
github.com/philhofer/fwd v1.1.1 h1:GdGcTjf5RNAxwS4QLsiMzJYj5KEvPJD3Abr261yRQXQ=
|
github.com/philhofer/fwd v1.1.1 h1:GdGcTjf5RNAxwS4QLsiMzJYj5KEvPJD3Abr261yRQXQ=
|
||||||
github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
|
github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
|
||||||
@ -1220,9 +1218,9 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
|
|||||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
|
github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
|
||||||
github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||||
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||||
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
|
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
|
||||||
github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||||
@ -1308,6 +1306,10 @@ github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0
|
|||||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||||
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||||
github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
|
github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
|
||||||
|
github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ=
|
||||||
|
github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
|
||||||
|
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
|
||||||
|
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
|
||||||
github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d h1:hrujxIzL1woJ7AwssoOcM/tq5JjjG2yYOc8odClEiXA=
|
github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d h1:hrujxIzL1woJ7AwssoOcM/tq5JjjG2yYOc8odClEiXA=
|
||||||
github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d/go.mod h1:uugorj2VCxiV1x+LzaIdVa9b4S4qGAcH6cbhh4qVxOU=
|
github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d/go.mod h1:uugorj2VCxiV1x+LzaIdVa9b4S4qGAcH6cbhh4qVxOU=
|
||||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||||
@ -1366,8 +1368,8 @@ github.com/sizeofint/webpanimation v0.0.0-20210809145948-1d2b32119882 h1:A7o8tOE
|
|||||||
github.com/sizeofint/webpanimation v0.0.0-20210809145948-1d2b32119882/go.mod h1:5IwJoz9Pw7JsrCN4/skkxUtSWT7myuUPLhCgv6Q5vvQ=
|
github.com/sizeofint/webpanimation v0.0.0-20210809145948-1d2b32119882/go.mod h1:5IwJoz9Pw7JsrCN4/skkxUtSWT7myuUPLhCgv6Q5vvQ=
|
||||||
github.com/skip2/go-qrcode v0.0.0-20190110000554-dc11ecdae0a9 h1:lpEzuenPuO1XNTeikEmvqYFcU37GVLl8SRNblzyvGBE=
|
github.com/skip2/go-qrcode v0.0.0-20190110000554-dc11ecdae0a9 h1:lpEzuenPuO1XNTeikEmvqYFcU37GVLl8SRNblzyvGBE=
|
||||||
github.com/skip2/go-qrcode v0.0.0-20190110000554-dc11ecdae0a9/go.mod h1:PLPIyL7ikehBD1OAjmKKiOEhbvWyHGaNDjquXMcYABo=
|
github.com/skip2/go-qrcode v0.0.0-20190110000554-dc11ecdae0a9/go.mod h1:PLPIyL7ikehBD1OAjmKKiOEhbvWyHGaNDjquXMcYABo=
|
||||||
github.com/slack-go/slack v0.12.2 h1:x3OppyMyGIbbiyFhsBmpf9pwkUzMhthJMRNmNlA4LaQ=
|
github.com/slack-go/slack v0.12.3 h1:92/dfFU8Q5XP6Wp5rr5/T5JHLM5c5Smtn53fhToAP88=
|
||||||
github.com/slack-go/slack v0.12.2/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw=
|
github.com/slack-go/slack v0.12.3/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw=
|
||||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||||
github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM=
|
github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM=
|
||||||
github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM=
|
github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM=
|
||||||
@ -1377,17 +1379,19 @@ github.com/smartystreets/gunit v1.0.0/go.mod h1:qwPWnhz6pn0NnRBP++URONOVyNkPyr4S
|
|||||||
github.com/snowflakedb/gosnowflake v1.6.3/go.mod h1:6hLajn6yxuJ4xUHZegMekpq9rnQbGJ7TMwXjgTmA6lg=
|
github.com/snowflakedb/gosnowflake v1.6.3/go.mod h1:6hLajn6yxuJ4xUHZegMekpq9rnQbGJ7TMwXjgTmA6lg=
|
||||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||||
github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
|
github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
|
||||||
|
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
|
||||||
|
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
|
||||||
github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
|
github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
|
||||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||||
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
|
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
|
||||||
github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM=
|
github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
|
||||||
github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ=
|
github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
|
||||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||||
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||||
github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA=
|
github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0=
|
||||||
github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48=
|
github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
|
||||||
github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||||
@ -1396,7 +1400,6 @@ github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSW
|
|||||||
github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
|
github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
|
||||||
github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
|
github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
|
||||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||||
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
|
|
||||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
||||||
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||||
github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||||
@ -1408,8 +1411,8 @@ github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DM
|
|||||||
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
||||||
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
|
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
|
||||||
github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
|
github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
|
||||||
github.com/spf13/viper v1.16.0 h1:rGGH0XDZhdUOryiDWjmIvUSWpbNqisK8Wk0Vyefw8hc=
|
github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ=
|
||||||
github.com/spf13/viper v1.16.0/go.mod h1:yg78JgCJcbrQOvV9YLXgkLaZqUidkY9K+Dd1FofRzQg=
|
github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk=
|
||||||
github.com/splitio/go-client/v6 v6.1.0/go.mod h1:CEGAEFT99Fwb32ZIRcnZoXTMXddtB6IIpTmt3RP8mnM=
|
github.com/splitio/go-client/v6 v6.1.0/go.mod h1:CEGAEFT99Fwb32ZIRcnZoXTMXddtB6IIpTmt3RP8mnM=
|
||||||
github.com/splitio/go-split-commons/v3 v3.1.0/go.mod h1:29NCy20oAS4ZMy4qkwTd6277eieVDonx4V/aeDU/wUQ=
|
github.com/splitio/go-split-commons/v3 v3.1.0/go.mod h1:29NCy20oAS4ZMy4qkwTd6277eieVDonx4V/aeDU/wUQ=
|
||||||
github.com/splitio/go-toolkit/v4 v4.2.0/go.mod h1:EdIHN0yzB1GTXDYQc0KdKvnjkO/jfUM2YqHVYfhD3Wo=
|
github.com/splitio/go-toolkit/v4 v4.2.0/go.mod h1:EdIHN0yzB1GTXDYQc0KdKvnjkO/jfUM2YqHVYfhD3Wo=
|
||||||
@ -1431,12 +1434,11 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
|||||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||||
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
|
||||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||||
github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8=
|
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
|
||||||
github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
|
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
|
||||||
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||||
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||||
@ -1492,8 +1494,9 @@ github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:tw
|
|||||||
github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
|
github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
|
||||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
|
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
|
||||||
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
|
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
|
||||||
github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU=
|
|
||||||
github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc=
|
github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc=
|
||||||
|
github.com/vmihailenco/msgpack/v5 v5.4.0 h1:hRM0digJwyR6vll33NNAwCFguy5JuBD6jxDmQP3l608=
|
||||||
|
github.com/vmihailenco/msgpack/v5 v5.4.0/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok=
|
||||||
github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
|
github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
|
||||||
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
|
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
|
||||||
github.com/wiggin77/merror v1.0.2/go.mod h1:uQTcIU0Z6jRK4OwqganPYerzQxSFJ4GSHM3aurxxQpg=
|
github.com/wiggin77/merror v1.0.2/go.mod h1:uQTcIU0Z6jRK4OwqganPYerzQxSFJ4GSHM3aurxxQpg=
|
||||||
@ -1533,7 +1536,6 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec
|
|||||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||||
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||||
github.com/yuin/goldmark v1.4.11/go.mod h1:rmuwmfZ0+bvzB24eSC//bk1R1Zp3hM0OXYv/G2LIilg=
|
github.com/yuin/goldmark v1.4.11/go.mod h1:rmuwmfZ0+bvzB24eSC//bk1R1Zp3hM0OXYv/G2LIilg=
|
||||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
|
||||||
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
|
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
|
||||||
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
|
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
|
||||||
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
|
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
|
||||||
@ -1551,8 +1553,10 @@ go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3
|
|||||||
go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
|
go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
|
||||||
go.mau.fi/libsignal v0.1.0 h1:vAKI/nJ5tMhdzke4cTK1fb0idJzz1JuEIpmjprueC+c=
|
go.mau.fi/libsignal v0.1.0 h1:vAKI/nJ5tMhdzke4cTK1fb0idJzz1JuEIpmjprueC+c=
|
||||||
go.mau.fi/libsignal v0.1.0/go.mod h1:R8ovrTezxtUNzCQE5PH30StOQWWeBskBsWE55vMfY9I=
|
go.mau.fi/libsignal v0.1.0/go.mod h1:R8ovrTezxtUNzCQE5PH30StOQWWeBskBsWE55vMfY9I=
|
||||||
go.mau.fi/whatsmeow v0.0.0-20230805111647-405414b9b5c0 h1:6kAOyrp8E9p99X1I3uj7BtEFspdcVjnYzUZpqcHo/mE=
|
go.mau.fi/util v0.2.0 h1:AMGBEdg9Ya/smb/09dljo9wBwKr432EpfjDWF7aFQg0=
|
||||||
go.mau.fi/whatsmeow v0.0.0-20230805111647-405414b9b5c0/go.mod h1:+ObGpFE6cbbY4hKc1FmQH9MVfqaemmlXGXSnwDvCOyE=
|
go.mau.fi/util v0.2.0/go.mod h1:AxuJUMCxpzgJ5eV9JbPWKRH8aAJJidxetNdUj7qcb84=
|
||||||
|
go.mau.fi/whatsmeow v0.0.0-20240106083317-757651400d8d h1:0BBgOKeVF2wewSiPnMSJP9tf6jswntwNg5LQQ9P2X8c=
|
||||||
|
go.mau.fi/whatsmeow v0.0.0-20240106083317-757651400d8d/go.mod h1:5xTtHNaZpGni6z6aE1iEopjW7wNgsKcolZxZrOujK9M=
|
||||||
go.mongodb.org/mongo-driver v1.7.0/go.mod h1:Q4oFMbo1+MSNqICAdYMlC/zSTrwCogR4R8NzkI+yfU8=
|
go.mongodb.org/mongo-driver v1.7.0/go.mod h1:Q4oFMbo1+MSNqICAdYMlC/zSTrwCogR4R8NzkI+yfU8=
|
||||||
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
|
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
|
||||||
go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
|
go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
|
||||||
@ -1569,10 +1573,13 @@ go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
|||||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||||
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||||
|
go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
|
||||||
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||||
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
|
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
|
||||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||||
|
go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
|
||||||
|
go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
|
||||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
|
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
|
||||||
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||||
@ -1613,9 +1620,8 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y
|
|||||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k=
|
||||||
golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk=
|
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
|
||||||
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
|
|
||||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
@ -1631,6 +1637,8 @@ golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u0
|
|||||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||||
golang.org/x/exp v0.0.0-20200908183739-ae8ad444f925/go.mod h1:1phAWC201xIgDyaFpmDeZkgf70Q4Pd/CNqfRtVPtxNw=
|
golang.org/x/exp v0.0.0-20200908183739-ae8ad444f925/go.mod h1:1phAWC201xIgDyaFpmDeZkgf70Q4Pd/CNqfRtVPtxNw=
|
||||||
|
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g=
|
||||||
|
golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k=
|
||||||
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
|
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
|
||||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||||
@ -1642,8 +1650,8 @@ golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+o
|
|||||||
golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||||
golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||||
golang.org/x/image v0.0.0-20220321031419-a8550c1d254a/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
|
golang.org/x/image v0.0.0-20220321031419-a8550c1d254a/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
|
||||||
golang.org/x/image v0.11.0 h1:ds2RoQvBvYTiJkwpSFDwCcDFNX7DqjL2WsUgTNk0Ooo=
|
golang.org/x/image v0.15.0 h1:kOELfmgrmJlw4Cdb7g/QGuB3CvDrXbqEIww/pNtNBm8=
|
||||||
golang.org/x/image v0.11.0/go.mod h1:bglhjqbqVuEb9e9+eNR45Jfu7D+T4Qan+NhQk8Ck2P8=
|
golang.org/x/image v0.15.0/go.mod h1:HUYqC05R2ZcZ3ejNQsIHQDQiwWM4JBqmm6MKANTp4LE=
|
||||||
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||||
@ -1670,9 +1678,8 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|||||||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
|
||||||
golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8=
|
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
|
||||||
golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
@ -1749,10 +1756,8 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx
|
|||||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||||
golang.org/x/net v0.0.0-20220403103023-749bd193bc2b/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
golang.org/x/net v0.0.0-20220403103023-749bd193bc2b/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
|
||||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
|
||||||
golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14=
|
|
||||||
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20180227000427-d7d64896b5ff/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180227000427-d7d64896b5ff/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
@ -1772,8 +1777,8 @@ golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ
|
|||||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||||
golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU=
|
golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ=
|
||||||
golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk=
|
golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM=
|
||||||
golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
|
golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
@ -1787,9 +1792,7 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
|
|||||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
|
||||||
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
|
||||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sys v0.0.0-20180224232135-f6cff0780e54/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180224232135-f6cff0780e54/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
@ -1883,7 +1886,6 @@ golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
@ -1892,7 +1894,6 @@ golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
@ -1908,26 +1909,20 @@ golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||||||
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220403205710-6acee93ad0eb/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220403205710-6acee93ad0eb/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
|
golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
|
||||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4=
|
||||||
golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0=
|
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
|
||||||
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
|
|
||||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
@ -1937,17 +1932,16 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|||||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||||
golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=
|
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||||
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
|
||||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
@ -2023,7 +2017,6 @@ golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4f
|
|||||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
|
||||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||||
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||||
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||||
@ -2032,9 +2025,8 @@ golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
|||||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||||
golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||||
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
||||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ=
|
||||||
golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM=
|
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
||||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
|
||||||
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
@ -2134,10 +2126,8 @@ google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6D
|
|||||||
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
|
||||||
google.golang.org/genproto v0.0.0-20210207032614-bba0dbe2a9ea/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20210207032614-bba0dbe2a9ea/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
|
||||||
google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
@ -2413,20 +2403,20 @@ modernc.org/libc v1.11.98/go.mod h1:ynK5sbjsU77AP+nn61+k+wxUGRx9rOFcIqWYYMaDZ4c=
|
|||||||
modernc.org/libc v1.11.99/go.mod h1:wLLYgEiY2D17NbBOEp+mIJJJBGSiy7fLL4ZrGGZ+8jI=
|
modernc.org/libc v1.11.99/go.mod h1:wLLYgEiY2D17NbBOEp+mIJJJBGSiy7fLL4ZrGGZ+8jI=
|
||||||
modernc.org/libc v1.11.101/go.mod h1:wLLYgEiY2D17NbBOEp+mIJJJBGSiy7fLL4ZrGGZ+8jI=
|
modernc.org/libc v1.11.101/go.mod h1:wLLYgEiY2D17NbBOEp+mIJJJBGSiy7fLL4ZrGGZ+8jI=
|
||||||
modernc.org/libc v1.11.104/go.mod h1:2MH3DaF/gCU8i/UBiVE1VFRos4o523M7zipmwH8SIgQ=
|
modernc.org/libc v1.11.104/go.mod h1:2MH3DaF/gCU8i/UBiVE1VFRos4o523M7zipmwH8SIgQ=
|
||||||
modernc.org/libc v1.24.1 h1:uvJSeCKL/AgzBo2yYIPPTy82v21KgGnizcGYfBHaNuM=
|
modernc.org/libc v1.29.0 h1:tTFRFq69YKCF2QyGNuRUQxKBm1uZZLubf6Cjh/pVHXs=
|
||||||
modernc.org/libc v1.24.1/go.mod h1:FmfO1RLrU3MHJfyi9eYYmZBfi/R+tqZ6+hQ3yQQUkak=
|
modernc.org/libc v1.29.0/go.mod h1:DaG/4Q3LRRdqpiLyP0C2m1B8ZMGkQ+cCgOIjEtQlYhQ=
|
||||||
modernc.org/lldb v1.0.0/go.mod h1:jcRvJGWfCGodDZz8BPwiKMJxGJngQ/5DrRapkQnLob8=
|
modernc.org/lldb v1.0.0/go.mod h1:jcRvJGWfCGodDZz8BPwiKMJxGJngQ/5DrRapkQnLob8=
|
||||||
modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=
|
modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=
|
||||||
modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
|
modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
|
||||||
modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
|
modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
|
||||||
modernc.org/mathutil v1.4.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
|
modernc.org/mathutil v1.4.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
|
||||||
modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
|
modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
|
||||||
modernc.org/mathutil v1.5.0 h1:rV0Ko/6SfM+8G+yKiyI830l3Wuz1zRutdslNoQ0kfiQ=
|
modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4=
|
||||||
modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
|
modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo=
|
||||||
modernc.org/memory v1.0.4/go.mod h1:nV2OApxradM3/OVbs2/0OsP6nPfakXpi50C7dcoHXlc=
|
modernc.org/memory v1.0.4/go.mod h1:nV2OApxradM3/OVbs2/0OsP6nPfakXpi50C7dcoHXlc=
|
||||||
modernc.org/memory v1.0.5/go.mod h1:B7OYswTRnfGg+4tDH1t1OeUNnsy2viGTdME4tzd+IjM=
|
modernc.org/memory v1.0.5/go.mod h1:B7OYswTRnfGg+4tDH1t1OeUNnsy2viGTdME4tzd+IjM=
|
||||||
modernc.org/memory v1.6.0 h1:i6mzavxrE9a30whzMfwf7XWVODx2r5OYXvU46cirX7o=
|
modernc.org/memory v1.7.2 h1:Klh90S215mmH8c9gO98QxQFsY+W451E8AnzjoE2ee1E=
|
||||||
modernc.org/memory v1.6.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU=
|
modernc.org/memory v1.7.2/go.mod h1:NO4NVCQy0N7ln+T9ngWqOQfi7ley4vpwvARR+Hjw95E=
|
||||||
modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
|
modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
|
||||||
modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
|
modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
|
||||||
modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
|
modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
|
||||||
@ -2434,8 +2424,8 @@ modernc.org/ql v1.0.0/go.mod h1:xGVyrLIatPcO2C1JvI/Co8c0sr6y91HKFNy4pt9JXEY=
|
|||||||
modernc.org/sortutil v1.1.0/go.mod h1:ZyL98OQHJgH9IEfN71VsamvJgrtRX9Dj2gX+vH86L1k=
|
modernc.org/sortutil v1.1.0/go.mod h1:ZyL98OQHJgH9IEfN71VsamvJgrtRX9Dj2gX+vH86L1k=
|
||||||
modernc.org/sqlite v1.10.6/go.mod h1:Z9FEjUtZP4qFEg6/SiADg9XCER7aYy9a/j7Pg9P7CPs=
|
modernc.org/sqlite v1.10.6/go.mod h1:Z9FEjUtZP4qFEg6/SiADg9XCER7aYy9a/j7Pg9P7CPs=
|
||||||
modernc.org/sqlite v1.14.3/go.mod h1:xMpicS1i2MJ4C8+Ap0vYBqTwYfpFvdnPE6brbFOtV2Y=
|
modernc.org/sqlite v1.14.3/go.mod h1:xMpicS1i2MJ4C8+Ap0vYBqTwYfpFvdnPE6brbFOtV2Y=
|
||||||
modernc.org/sqlite v1.25.0 h1:AFweiwPNd/b3BoKnBOfFm+Y260guGMF+0UFk0savqeA=
|
modernc.org/sqlite v1.28.0 h1:Zx+LyDDmXczNnEQdvPuEfcFVA2ZPyaD7UCZDjef3BHQ=
|
||||||
modernc.org/sqlite v1.25.0/go.mod h1:FL3pVXie73rg3Rii6V/u5BoHlSoyeZeIgKZEgHARyCU=
|
modernc.org/sqlite v1.28.0/go.mod h1:Qxpazz0zH8Z1xCFyi5GSL3FzbtZ3fvbjmywNogldEW0=
|
||||||
modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
|
modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
|
||||||
modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw=
|
modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw=
|
||||||
modernc.org/strutil v1.1.3 h1:fNMm+oJklMGYfU9Ylcywl0CO5O6nTfaowNsh2wpPjzY=
|
modernc.org/strutil v1.1.3 h1:fNMm+oJklMGYfU9Ylcywl0CO5O6nTfaowNsh2wpPjzY=
|
||||||
|
2
vendor/github.com/SevereCloud/vksdk/v2/doc.go
generated
vendored
2
vendor/github.com/SevereCloud/vksdk/v2/doc.go
generated
vendored
@ -7,6 +7,6 @@ package vksdk
|
|||||||
|
|
||||||
// Module constants.
|
// Module constants.
|
||||||
const (
|
const (
|
||||||
Version = "2.16.0"
|
Version = "2.16.1"
|
||||||
API = "5.131"
|
API = "5.131"
|
||||||
)
|
)
|
||||||
|
2
vendor/github.com/SevereCloud/vksdk/v2/object/board.go
generated
vendored
2
vendor/github.com/SevereCloud/vksdk/v2/object/board.go
generated
vendored
@ -36,5 +36,5 @@ type BoardTopicPoll struct {
|
|||||||
OwnerID int `json:"owner_id"` // Poll owner's ID
|
OwnerID int `json:"owner_id"` // Poll owner's ID
|
||||||
PollID int `json:"poll_id"` // Poll ID
|
PollID int `json:"poll_id"` // Poll ID
|
||||||
Question string `json:"question"` // Poll question
|
Question string `json:"question"` // Poll question
|
||||||
Votes string `json:"votes"` // Votes number
|
Votes int `json:"votes"` // Votes number
|
||||||
}
|
}
|
||||||
|
4
vendor/github.com/SevereCloud/vksdk/v2/object/object.go
generated
vendored
4
vendor/github.com/SevereCloud/vksdk/v2/object/object.go
generated
vendored
@ -285,8 +285,8 @@ type BaseLinkProduct struct {
|
|||||||
|
|
||||||
// BaseLinkRating struct.
|
// BaseLinkRating struct.
|
||||||
type BaseLinkRating struct {
|
type BaseLinkRating struct {
|
||||||
ReviewsCount int `json:"reviews_count"`
|
ReviewsCount json.Number `json:"reviews_count"`
|
||||||
Stars float64 `json:"stars"`
|
Stars float64 `json:"stars"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// BasePlace struct.
|
// BasePlace struct.
|
||||||
|
13
vendor/github.com/fsnotify/fsnotify/.cirrus.yml
generated
vendored
Normal file
13
vendor/github.com/fsnotify/fsnotify/.cirrus.yml
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
freebsd_task:
|
||||||
|
name: 'FreeBSD'
|
||||||
|
freebsd_instance:
|
||||||
|
image_family: freebsd-13-2
|
||||||
|
install_script:
|
||||||
|
- pkg update -f
|
||||||
|
- pkg install -y go
|
||||||
|
test_script:
|
||||||
|
# run tests as user "cirrus" instead of root
|
||||||
|
- pw useradd cirrus -m
|
||||||
|
- chown -R cirrus:cirrus .
|
||||||
|
- FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./...
|
||||||
|
- sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./...
|
1
vendor/github.com/fsnotify/fsnotify/.gitignore
generated
vendored
1
vendor/github.com/fsnotify/fsnotify/.gitignore
generated
vendored
@ -4,3 +4,4 @@
|
|||||||
|
|
||||||
# Output of go build ./cmd/fsnotify
|
# Output of go build ./cmd/fsnotify
|
||||||
/fsnotify
|
/fsnotify
|
||||||
|
/fsnotify.exe
|
||||||
|
87
vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
generated
vendored
87
vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
generated
vendored
@ -1,16 +1,87 @@
|
|||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
All notable changes to this project will be documented in this file.
|
Unreleased
|
||||||
|
----------
|
||||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
|
||||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
|
||||||
|
|
||||||
## [Unreleased]
|
|
||||||
|
|
||||||
Nothing yet.
|
Nothing yet.
|
||||||
|
|
||||||
## [1.6.0] - 2022-10-13
|
1.7.0 - 2023-10-22
|
||||||
|
------------------
|
||||||
|
This version of fsnotify needs Go 1.17.
|
||||||
|
|
||||||
|
### Additions
|
||||||
|
|
||||||
|
- illumos: add FEN backend to support illumos and Solaris. ([#371])
|
||||||
|
|
||||||
|
- all: add `NewBufferedWatcher()` to use a buffered channel, which can be useful
|
||||||
|
in cases where you can't control the kernel buffer and receive a large number
|
||||||
|
of events in bursts. ([#550], [#572])
|
||||||
|
|
||||||
|
- all: add `AddWith()`, which is identical to `Add()` but allows passing
|
||||||
|
options. ([#521])
|
||||||
|
|
||||||
|
- windows: allow setting the ReadDirectoryChangesW() buffer size with
|
||||||
|
`fsnotify.WithBufferSize()`; the default of 64K is the highest value that
|
||||||
|
works on all platforms and is enough for most purposes, but in some cases a
|
||||||
|
highest buffer is needed. ([#521])
|
||||||
|
|
||||||
|
### Changes and fixes
|
||||||
|
|
||||||
|
- inotify: remove watcher if a watched path is renamed ([#518])
|
||||||
|
|
||||||
|
After a rename the reported name wasn't updated, or even an empty string.
|
||||||
|
Inotify doesn't provide any good facilities to update it, so just remove the
|
||||||
|
watcher. This is already how it worked on kqueue and FEN.
|
||||||
|
|
||||||
|
On Windows this does work, and remains working.
|
||||||
|
|
||||||
|
- windows: don't listen for file attribute changes ([#520])
|
||||||
|
|
||||||
|
File attribute changes are sent as `FILE_ACTION_MODIFIED` by the Windows API,
|
||||||
|
with no way to see if they're a file write or attribute change, so would show
|
||||||
|
up as a fsnotify.Write event. This is never useful, and could result in many
|
||||||
|
spurious Write events.
|
||||||
|
|
||||||
|
- windows: return `ErrEventOverflow` if the buffer is full ([#525])
|
||||||
|
|
||||||
|
Before it would merely return "short read", making it hard to detect this
|
||||||
|
error.
|
||||||
|
|
||||||
|
- kqueue: make sure events for all files are delivered properly when removing a
|
||||||
|
watched directory ([#526])
|
||||||
|
|
||||||
|
Previously they would get sent with `""` (empty string) or `"."` as the path
|
||||||
|
name.
|
||||||
|
|
||||||
|
- kqueue: don't emit spurious Create events for symbolic links ([#524])
|
||||||
|
|
||||||
|
The link would get resolved but kqueue would "forget" it already saw the link
|
||||||
|
itself, resulting on a Create for every Write event for the directory.
|
||||||
|
|
||||||
|
- all: return `ErrClosed` on `Add()` when the watcher is closed ([#516])
|
||||||
|
|
||||||
|
- other: add `Watcher.Errors` and `Watcher.Events` to the no-op `Watcher` in
|
||||||
|
`backend_other.go`, making it easier to use on unsupported platforms such as
|
||||||
|
WASM, AIX, etc. ([#528])
|
||||||
|
|
||||||
|
- other: use the `backend_other.go` no-op if the `appengine` build tag is set;
|
||||||
|
Google AppEngine forbids usage of the unsafe package so the inotify backend
|
||||||
|
won't compile there.
|
||||||
|
|
||||||
|
[#371]: https://github.com/fsnotify/fsnotify/pull/371
|
||||||
|
[#516]: https://github.com/fsnotify/fsnotify/pull/516
|
||||||
|
[#518]: https://github.com/fsnotify/fsnotify/pull/518
|
||||||
|
[#520]: https://github.com/fsnotify/fsnotify/pull/520
|
||||||
|
[#521]: https://github.com/fsnotify/fsnotify/pull/521
|
||||||
|
[#524]: https://github.com/fsnotify/fsnotify/pull/524
|
||||||
|
[#525]: https://github.com/fsnotify/fsnotify/pull/525
|
||||||
|
[#526]: https://github.com/fsnotify/fsnotify/pull/526
|
||||||
|
[#528]: https://github.com/fsnotify/fsnotify/pull/528
|
||||||
|
[#537]: https://github.com/fsnotify/fsnotify/pull/537
|
||||||
|
[#550]: https://github.com/fsnotify/fsnotify/pull/550
|
||||||
|
[#572]: https://github.com/fsnotify/fsnotify/pull/572
|
||||||
|
|
||||||
|
1.6.0 - 2022-10-13
|
||||||
|
------------------
|
||||||
This version of fsnotify needs Go 1.16 (this was already the case since 1.5.1,
|
This version of fsnotify needs Go 1.16 (this was already the case since 1.5.1,
|
||||||
but not documented). It also increases the minimum Linux version to 2.6.32.
|
but not documented). It also increases the minimum Linux version to 2.6.32.
|
||||||
|
|
||||||
|
79
vendor/github.com/fsnotify/fsnotify/README.md
generated
vendored
79
vendor/github.com/fsnotify/fsnotify/README.md
generated
vendored
@ -1,29 +1,31 @@
|
|||||||
fsnotify is a Go library to provide cross-platform filesystem notifications on
|
fsnotify is a Go library to provide cross-platform filesystem notifications on
|
||||||
Windows, Linux, macOS, and BSD systems.
|
Windows, Linux, macOS, BSD, and illumos.
|
||||||
|
|
||||||
Go 1.16 or newer is required; the full documentation is at
|
Go 1.17 or newer is required; the full documentation is at
|
||||||
https://pkg.go.dev/github.com/fsnotify/fsnotify
|
https://pkg.go.dev/github.com/fsnotify/fsnotify
|
||||||
|
|
||||||
**It's best to read the documentation at pkg.go.dev, as it's pinned to the last
|
|
||||||
released version, whereas this README is for the last development version which
|
|
||||||
may include additions/changes.**
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
Platform support:
|
Platform support:
|
||||||
|
|
||||||
| Adapter | OS | Status |
|
| Backend | OS | Status |
|
||||||
| --------------------- | ---------------| -------------------------------------------------------------|
|
| :-------------------- | :--------- | :------------------------------------------------------------------------ |
|
||||||
| inotify | Linux 2.6.32+ | Supported |
|
| inotify | Linux | Supported |
|
||||||
| kqueue | BSD, macOS | Supported |
|
| kqueue | BSD, macOS | Supported |
|
||||||
| ReadDirectoryChangesW | Windows | Supported |
|
| ReadDirectoryChangesW | Windows | Supported |
|
||||||
| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) |
|
| FEN | illumos | Supported |
|
||||||
| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/pull/371) |
|
| fanotify | Linux 5.9+ | [Not yet](https://github.com/fsnotify/fsnotify/issues/114) |
|
||||||
| fanotify | Linux 5.9+ | [Maybe](https://github.com/fsnotify/fsnotify/issues/114) |
|
| AHAFS | AIX | [aix branch]; experimental due to lack of maintainer and test environment |
|
||||||
| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) |
|
| FSEvents | macOS | [Needs support in x/sys/unix][fsevents] |
|
||||||
| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) |
|
| USN Journals | Windows | [Needs support in x/sys/windows][usn] |
|
||||||
|
| Polling | *All* | [Not yet](https://github.com/fsnotify/fsnotify/issues/9) |
|
||||||
|
|
||||||
Linux and macOS should include Android and iOS, but these are currently untested.
|
Linux and illumos should include Android and Solaris, but these are currently
|
||||||
|
untested.
|
||||||
|
|
||||||
|
[fsevents]: https://github.com/fsnotify/fsnotify/issues/11#issuecomment-1279133120
|
||||||
|
[usn]: https://github.com/fsnotify/fsnotify/issues/53#issuecomment-1279829847
|
||||||
|
[aix branch]: https://github.com/fsnotify/fsnotify/issues/353#issuecomment-1284590129
|
||||||
|
|
||||||
Usage
|
Usage
|
||||||
-----
|
-----
|
||||||
@ -83,20 +85,23 @@ run with:
|
|||||||
|
|
||||||
% go run ./cmd/fsnotify
|
% go run ./cmd/fsnotify
|
||||||
|
|
||||||
|
Further detailed documentation can be found in godoc:
|
||||||
|
https://pkg.go.dev/github.com/fsnotify/fsnotify
|
||||||
|
|
||||||
FAQ
|
FAQ
|
||||||
---
|
---
|
||||||
### Will a file still be watched when it's moved to another directory?
|
### Will a file still be watched when it's moved to another directory?
|
||||||
No, not unless you are watching the location it was moved to.
|
No, not unless you are watching the location it was moved to.
|
||||||
|
|
||||||
### Are subdirectories watched too?
|
### Are subdirectories watched?
|
||||||
No, you must add watches for any directory you want to watch (a recursive
|
No, you must add watches for any directory you want to watch (a recursive
|
||||||
watcher is on the roadmap: [#18]).
|
watcher is on the roadmap: [#18]).
|
||||||
|
|
||||||
[#18]: https://github.com/fsnotify/fsnotify/issues/18
|
[#18]: https://github.com/fsnotify/fsnotify/issues/18
|
||||||
|
|
||||||
### Do I have to watch the Error and Event channels in a goroutine?
|
### Do I have to watch the Error and Event channels in a goroutine?
|
||||||
As of now, yes (you can read both channels in the same goroutine using `select`,
|
Yes. You can read both channels in the same goroutine using `select` (you don't
|
||||||
you don't need a separate goroutine for both channels; see the example).
|
need a separate goroutine for both channels; see the example).
|
||||||
|
|
||||||
### Why don't notifications work with NFS, SMB, FUSE, /proc, or /sys?
|
### Why don't notifications work with NFS, SMB, FUSE, /proc, or /sys?
|
||||||
fsnotify requires support from underlying OS to work. The current NFS and SMB
|
fsnotify requires support from underlying OS to work. The current NFS and SMB
|
||||||
@ -107,6 +112,32 @@ This could be fixed with a polling watcher ([#9]), but it's not yet implemented.
|
|||||||
|
|
||||||
[#9]: https://github.com/fsnotify/fsnotify/issues/9
|
[#9]: https://github.com/fsnotify/fsnotify/issues/9
|
||||||
|
|
||||||
|
### Why do I get many Chmod events?
|
||||||
|
Some programs may generate a lot of attribute changes; for example Spotlight on
|
||||||
|
macOS, anti-virus programs, backup applications, and some others are known to do
|
||||||
|
this. As a rule, it's typically best to ignore Chmod events. They're often not
|
||||||
|
useful, and tend to cause problems.
|
||||||
|
|
||||||
|
Spotlight indexing on macOS can result in multiple events (see [#15]). A
|
||||||
|
temporary workaround is to add your folder(s) to the *Spotlight Privacy
|
||||||
|
settings* until we have a native FSEvents implementation (see [#11]).
|
||||||
|
|
||||||
|
[#11]: https://github.com/fsnotify/fsnotify/issues/11
|
||||||
|
[#15]: https://github.com/fsnotify/fsnotify/issues/15
|
||||||
|
|
||||||
|
### Watching a file doesn't work well
|
||||||
|
Watching individual files (rather than directories) is generally not recommended
|
||||||
|
as many programs (especially editors) update files atomically: it will write to
|
||||||
|
a temporary file which is then moved to to destination, overwriting the original
|
||||||
|
(or some variant thereof). The watcher on the original file is now lost, as that
|
||||||
|
no longer exists.
|
||||||
|
|
||||||
|
The upshot of this is that a power failure or crash won't leave a half-written
|
||||||
|
file.
|
||||||
|
|
||||||
|
Watch the parent directory and use `Event.Name` to filter out files you're not
|
||||||
|
interested in. There is an example of this in `cmd/fsnotify/file.go`.
|
||||||
|
|
||||||
Platform-specific notes
|
Platform-specific notes
|
||||||
-----------------------
|
-----------------------
|
||||||
### Linux
|
### Linux
|
||||||
@ -151,11 +182,3 @@ these platforms.
|
|||||||
|
|
||||||
The sysctl variables `kern.maxfiles` and `kern.maxfilesperproc` can be used to
|
The sysctl variables `kern.maxfiles` and `kern.maxfilesperproc` can be used to
|
||||||
control the maximum number of open files.
|
control the maximum number of open files.
|
||||||
|
|
||||||
### macOS
|
|
||||||
Spotlight indexing on macOS can result in multiple events (see [#15]). A temporary
|
|
||||||
workaround is to add your folder(s) to the *Spotlight Privacy settings* until we
|
|
||||||
have a native FSEvents implementation (see [#11]).
|
|
||||||
|
|
||||||
[#11]: https://github.com/fsnotify/fsnotify/issues/11
|
|
||||||
[#15]: https://github.com/fsnotify/fsnotify/issues/15
|
|
||||||
|
550
vendor/github.com/fsnotify/fsnotify/backend_fen.go
generated
vendored
550
vendor/github.com/fsnotify/fsnotify/backend_fen.go
generated
vendored
@ -1,10 +1,19 @@
|
|||||||
//go:build solaris
|
//go:build solaris
|
||||||
// +build solaris
|
// +build solaris
|
||||||
|
|
||||||
|
// Note: the documentation on the Watcher type and methods is generated from
|
||||||
|
// mkdoc.zsh
|
||||||
|
|
||||||
package fsnotify
|
package fsnotify
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Watcher watches a set of paths, delivering events on a channel.
|
// Watcher watches a set of paths, delivering events on a channel.
|
||||||
@ -17,9 +26,9 @@ import (
|
|||||||
// When a file is removed a Remove event won't be emitted until all file
|
// When a file is removed a Remove event won't be emitted until all file
|
||||||
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
||||||
//
|
//
|
||||||
// fp := os.Open("file")
|
// fp := os.Open("file")
|
||||||
// os.Remove("file") // Triggers Chmod
|
// os.Remove("file") // Triggers Chmod
|
||||||
// fp.Close() // Triggers Remove
|
// fp.Close() // Triggers Remove
|
||||||
//
|
//
|
||||||
// This is the event that inotify sends, so not much can be changed about this.
|
// This is the event that inotify sends, so not much can be changed about this.
|
||||||
//
|
//
|
||||||
@ -33,16 +42,16 @@ import (
|
|||||||
//
|
//
|
||||||
// To increase them you can use sysctl or write the value to the /proc file:
|
// To increase them you can use sysctl or write the value to the /proc file:
|
||||||
//
|
//
|
||||||
// # Default values on Linux 5.18
|
// # Default values on Linux 5.18
|
||||||
// sysctl fs.inotify.max_user_watches=124983
|
// sysctl fs.inotify.max_user_watches=124983
|
||||||
// sysctl fs.inotify.max_user_instances=128
|
// sysctl fs.inotify.max_user_instances=128
|
||||||
//
|
//
|
||||||
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
||||||
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
||||||
// your distro's documentation):
|
// your distro's documentation):
|
||||||
//
|
//
|
||||||
// fs.inotify.max_user_watches=124983
|
// fs.inotify.max_user_watches=124983
|
||||||
// fs.inotify.max_user_instances=128
|
// fs.inotify.max_user_instances=128
|
||||||
//
|
//
|
||||||
// Reaching the limit will result in a "no space left on device" or "too many open
|
// Reaching the limit will result in a "no space left on device" or "too many open
|
||||||
// files" error.
|
// files" error.
|
||||||
@ -58,14 +67,20 @@ import (
|
|||||||
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
||||||
// systems.
|
// systems.
|
||||||
//
|
//
|
||||||
// # macOS notes
|
// # Windows notes
|
||||||
//
|
//
|
||||||
// Spotlight indexing on macOS can result in multiple events (see [#15]). A
|
// Paths can be added as "C:\path\to\dir", but forward slashes
|
||||||
// temporary workaround is to add your folder(s) to the "Spotlight Privacy
|
// ("C:/path/to/dir") will also work.
|
||||||
// Settings" until we have a native FSEvents implementation (see [#11]).
|
|
||||||
//
|
//
|
||||||
// [#11]: https://github.com/fsnotify/fsnotify/issues/11
|
// When a watched directory is removed it will always send an event for the
|
||||||
// [#15]: https://github.com/fsnotify/fsnotify/issues/15
|
// directory itself, but may not send events for all files in that directory.
|
||||||
|
// Sometimes it will send events for all times, sometimes it will send no
|
||||||
|
// events, and often only for some files.
|
||||||
|
//
|
||||||
|
// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
|
||||||
|
// value that is guaranteed to work with SMB filesystems. If you have many
|
||||||
|
// events in quick succession this may not be enough, and you will have to use
|
||||||
|
// [WithBufferSize] to increase the value.
|
||||||
type Watcher struct {
|
type Watcher struct {
|
||||||
// Events sends the filesystem change events.
|
// Events sends the filesystem change events.
|
||||||
//
|
//
|
||||||
@ -92,44 +107,129 @@ type Watcher struct {
|
|||||||
// initiated by the user may show up as one or multiple
|
// initiated by the user may show up as one or multiple
|
||||||
// writes, depending on when the system syncs things to
|
// writes, depending on when the system syncs things to
|
||||||
// disk. For example when compiling a large Go program
|
// disk. For example when compiling a large Go program
|
||||||
// you may get hundreds of Write events, so you
|
// you may get hundreds of Write events, and you may
|
||||||
// probably want to wait until you've stopped receiving
|
// want to wait until you've stopped receiving them
|
||||||
// them (see the dedup example in cmd/fsnotify).
|
// (see the dedup example in cmd/fsnotify).
|
||||||
|
//
|
||||||
|
// Some systems may send Write event for directories
|
||||||
|
// when the directory content changes.
|
||||||
//
|
//
|
||||||
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
||||||
// when a file is removed (or more accurately, when a
|
// when a file is removed (or more accurately, when a
|
||||||
// link to an inode is removed). On kqueue it's sent
|
// link to an inode is removed). On kqueue it's sent
|
||||||
// and on kqueue when a file is truncated. On Windows
|
// when a file is truncated. On Windows it's never
|
||||||
// it's never sent.
|
// sent.
|
||||||
Events chan Event
|
Events chan Event
|
||||||
|
|
||||||
// Errors sends any errors.
|
// Errors sends any errors.
|
||||||
|
//
|
||||||
|
// ErrEventOverflow is used to indicate there are too many events:
|
||||||
|
//
|
||||||
|
// - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
|
||||||
|
// - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
|
||||||
|
// - kqueue, fen: Not used.
|
||||||
Errors chan error
|
Errors chan error
|
||||||
|
|
||||||
|
mu sync.Mutex
|
||||||
|
port *unix.EventPort
|
||||||
|
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
|
||||||
|
dirs map[string]struct{} // Explicitly watched directories
|
||||||
|
watches map[string]struct{} // Explicitly watched non-directories
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewWatcher creates a new Watcher.
|
// NewWatcher creates a new Watcher.
|
||||||
func NewWatcher() (*Watcher, error) {
|
func NewWatcher() (*Watcher, error) {
|
||||||
return nil, errors.New("FEN based watcher not yet supported for fsnotify\n")
|
return NewBufferedWatcher(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close removes all watches and closes the events channel.
|
// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
|
||||||
|
// channel.
|
||||||
|
//
|
||||||
|
// The main use case for this is situations with a very large number of events
|
||||||
|
// where the kernel buffer size can't be increased (e.g. due to lack of
|
||||||
|
// permissions). An unbuffered Watcher will perform better for almost all use
|
||||||
|
// cases, and whenever possible you will be better off increasing the kernel
|
||||||
|
// buffers instead of adding a large userspace buffer.
|
||||||
|
func NewBufferedWatcher(sz uint) (*Watcher, error) {
|
||||||
|
w := &Watcher{
|
||||||
|
Events: make(chan Event, sz),
|
||||||
|
Errors: make(chan error),
|
||||||
|
dirs: make(map[string]struct{}),
|
||||||
|
watches: make(map[string]struct{}),
|
||||||
|
done: make(chan struct{}),
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
w.port, err = unix.NewEventPort()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("fsnotify.NewWatcher: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
go w.readEvents()
|
||||||
|
return w, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// sendEvent attempts to send an event to the user, returning true if the event
|
||||||
|
// was put in the channel successfully and false if the watcher has been closed.
|
||||||
|
func (w *Watcher) sendEvent(name string, op Op) (sent bool) {
|
||||||
|
select {
|
||||||
|
case w.Events <- Event{Name: name, Op: op}:
|
||||||
|
return true
|
||||||
|
case <-w.done:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// sendError attempts to send an error to the user, returning true if the error
|
||||||
|
// was put in the channel successfully and false if the watcher has been closed.
|
||||||
|
func (w *Watcher) sendError(err error) (sent bool) {
|
||||||
|
select {
|
||||||
|
case w.Errors <- err:
|
||||||
|
return true
|
||||||
|
case <-w.done:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Watcher) isClosed() bool {
|
||||||
|
select {
|
||||||
|
case <-w.done:
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close removes all watches and closes the Events channel.
|
||||||
func (w *Watcher) Close() error {
|
func (w *Watcher) Close() error {
|
||||||
return nil
|
// Take the lock used by associateFile to prevent lingering events from
|
||||||
|
// being processed after the close
|
||||||
|
w.mu.Lock()
|
||||||
|
defer w.mu.Unlock()
|
||||||
|
if w.isClosed() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
close(w.done)
|
||||||
|
return w.port.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add starts monitoring the path for changes.
|
// Add starts monitoring the path for changes.
|
||||||
//
|
//
|
||||||
// A path can only be watched once; attempting to watch it more than once will
|
// A path can only be watched once; watching it more than once is a no-op and will
|
||||||
// return an error. Paths that do not yet exist on the filesystem cannot be
|
// not return an error. Paths that do not yet exist on the filesystem cannot be
|
||||||
// added. A watch will be automatically removed if the path is deleted.
|
// watched.
|
||||||
//
|
//
|
||||||
// A path will remain watched if it gets renamed to somewhere else on the same
|
// A watch will be automatically removed if the watched path is deleted or
|
||||||
// filesystem, but the monitor will get removed if the path gets deleted and
|
// renamed. The exception is the Windows backend, which doesn't remove the
|
||||||
// re-created, or if it's moved to a different filesystem.
|
// watcher on renames.
|
||||||
//
|
//
|
||||||
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
||||||
// filesystems (/proc, /sys, etc.) generally don't work.
|
// filesystems (/proc, /sys, etc.) generally don't work.
|
||||||
//
|
//
|
||||||
|
// Returns [ErrClosed] if [Watcher.Close] was called.
|
||||||
|
//
|
||||||
|
// See [Watcher.AddWith] for a version that allows adding options.
|
||||||
|
//
|
||||||
// # Watching directories
|
// # Watching directories
|
||||||
//
|
//
|
||||||
// All files in a directory are monitored, including new files that are created
|
// All files in a directory are monitored, including new files that are created
|
||||||
@ -139,15 +239,63 @@ func (w *Watcher) Close() error {
|
|||||||
// # Watching files
|
// # Watching files
|
||||||
//
|
//
|
||||||
// Watching individual files (rather than directories) is generally not
|
// Watching individual files (rather than directories) is generally not
|
||||||
// recommended as many tools update files atomically. Instead of "just" writing
|
// recommended as many programs (especially editors) update files atomically: it
|
||||||
// to the file a temporary file will be written to first, and if successful the
|
// will write to a temporary file which is then moved to to destination,
|
||||||
// temporary file is moved to to destination removing the original, or some
|
// overwriting the original (or some variant thereof). The watcher on the
|
||||||
// variant thereof. The watcher on the original file is now lost, as it no
|
// original file is now lost, as that no longer exists.
|
||||||
// longer exists.
|
|
||||||
//
|
//
|
||||||
// Instead, watch the parent directory and use Event.Name to filter out files
|
// The upshot of this is that a power failure or crash won't leave a
|
||||||
// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
|
// half-written file.
|
||||||
func (w *Watcher) Add(name string) error {
|
//
|
||||||
|
// Watch the parent directory and use Event.Name to filter out files you're not
|
||||||
|
// interested in. There is an example of this in cmd/fsnotify/file.go.
|
||||||
|
func (w *Watcher) Add(name string) error { return w.AddWith(name) }
|
||||||
|
|
||||||
|
// AddWith is like [Watcher.Add], but allows adding options. When using Add()
|
||||||
|
// the defaults described below are used.
|
||||||
|
//
|
||||||
|
// Possible options are:
|
||||||
|
//
|
||||||
|
// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
|
||||||
|
// other platforms. The default is 64K (65536 bytes).
|
||||||
|
func (w *Watcher) AddWith(name string, opts ...addOpt) error {
|
||||||
|
if w.isClosed() {
|
||||||
|
return ErrClosed
|
||||||
|
}
|
||||||
|
if w.port.PathIsWatched(name) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
_ = getOptions(opts...)
|
||||||
|
|
||||||
|
// Currently we resolve symlinks that were explicitly requested to be
|
||||||
|
// watched. Otherwise we would use LStat here.
|
||||||
|
stat, err := os.Stat(name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Associate all files in the directory.
|
||||||
|
if stat.IsDir() {
|
||||||
|
err := w.handleDirectory(name, stat, true, w.associateFile)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
w.mu.Lock()
|
||||||
|
w.dirs[name] = struct{}{}
|
||||||
|
w.mu.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
err = w.associateFile(name, stat, true)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
w.mu.Lock()
|
||||||
|
w.watches[name] = struct{}{}
|
||||||
|
w.mu.Unlock()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -157,6 +305,336 @@ func (w *Watcher) Add(name string) error {
|
|||||||
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
||||||
//
|
//
|
||||||
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
||||||
|
//
|
||||||
|
// Returns nil if [Watcher.Close] was called.
|
||||||
func (w *Watcher) Remove(name string) error {
|
func (w *Watcher) Remove(name string) error {
|
||||||
|
if w.isClosed() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if !w.port.PathIsWatched(name) {
|
||||||
|
return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The user has expressed an intent. Immediately remove this name from
|
||||||
|
// whichever watch list it might be in. If it's not in there the delete
|
||||||
|
// doesn't cause harm.
|
||||||
|
w.mu.Lock()
|
||||||
|
delete(w.watches, name)
|
||||||
|
delete(w.dirs, name)
|
||||||
|
w.mu.Unlock()
|
||||||
|
|
||||||
|
stat, err := os.Stat(name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove associations for every file in the directory.
|
||||||
|
if stat.IsDir() {
|
||||||
|
err := w.handleDirectory(name, stat, false, w.dissociateFile)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
err = w.port.DissociatePath(name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// readEvents contains the main loop that runs in a goroutine watching for events.
|
||||||
|
func (w *Watcher) readEvents() {
|
||||||
|
// If this function returns, the watcher has been closed and we can close
|
||||||
|
// these channels
|
||||||
|
defer func() {
|
||||||
|
close(w.Errors)
|
||||||
|
close(w.Events)
|
||||||
|
}()
|
||||||
|
|
||||||
|
pevents := make([]unix.PortEvent, 8)
|
||||||
|
for {
|
||||||
|
count, err := w.port.Get(pevents, 1, nil)
|
||||||
|
if err != nil && err != unix.ETIME {
|
||||||
|
// Interrupted system call (count should be 0) ignore and continue
|
||||||
|
if errors.Is(err, unix.EINTR) && count == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Get failed because we called w.Close()
|
||||||
|
if errors.Is(err, unix.EBADF) && w.isClosed() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// There was an error not caused by calling w.Close()
|
||||||
|
if !w.sendError(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
p := pevents[:count]
|
||||||
|
for _, pevent := range p {
|
||||||
|
if pevent.Source != unix.PORT_SOURCE_FILE {
|
||||||
|
// Event from unexpected source received; should never happen.
|
||||||
|
if !w.sendError(errors.New("Event from unexpected source received")) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
err = w.handleEvent(&pevent)
|
||||||
|
if err != nil {
|
||||||
|
if !w.sendError(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error {
|
||||||
|
files, err := os.ReadDir(path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle all children of the directory.
|
||||||
|
for _, entry := range files {
|
||||||
|
finfo, err := entry.Info()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = handler(filepath.Join(path, finfo.Name()), finfo, false)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// And finally handle the directory itself.
|
||||||
|
return handler(path, stat, follow)
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleEvent might need to emit more than one fsnotify event if the events
|
||||||
|
// bitmap matches more than one event type (e.g. the file was both modified and
|
||||||
|
// had the attributes changed between when the association was created and the
|
||||||
|
// when event was returned)
|
||||||
|
func (w *Watcher) handleEvent(event *unix.PortEvent) error {
|
||||||
|
var (
|
||||||
|
events = event.Events
|
||||||
|
path = event.Path
|
||||||
|
fmode = event.Cookie.(os.FileMode)
|
||||||
|
reRegister = true
|
||||||
|
)
|
||||||
|
|
||||||
|
w.mu.Lock()
|
||||||
|
_, watchedDir := w.dirs[path]
|
||||||
|
_, watchedPath := w.watches[path]
|
||||||
|
w.mu.Unlock()
|
||||||
|
isWatched := watchedDir || watchedPath
|
||||||
|
|
||||||
|
if events&unix.FILE_DELETE != 0 {
|
||||||
|
if !w.sendEvent(path, Remove) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
reRegister = false
|
||||||
|
}
|
||||||
|
if events&unix.FILE_RENAME_FROM != 0 {
|
||||||
|
if !w.sendEvent(path, Rename) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Don't keep watching the new file name
|
||||||
|
reRegister = false
|
||||||
|
}
|
||||||
|
if events&unix.FILE_RENAME_TO != 0 {
|
||||||
|
// We don't report a Rename event for this case, because Rename events
|
||||||
|
// are interpreted as referring to the _old_ name of the file, and in
|
||||||
|
// this case the event would refer to the new name of the file. This
|
||||||
|
// type of rename event is not supported by fsnotify.
|
||||||
|
|
||||||
|
// inotify reports a Remove event in this case, so we simulate this
|
||||||
|
// here.
|
||||||
|
if !w.sendEvent(path, Remove) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Don't keep watching the file that was removed
|
||||||
|
reRegister = false
|
||||||
|
}
|
||||||
|
|
||||||
|
// The file is gone, nothing left to do.
|
||||||
|
if !reRegister {
|
||||||
|
if watchedDir {
|
||||||
|
w.mu.Lock()
|
||||||
|
delete(w.dirs, path)
|
||||||
|
w.mu.Unlock()
|
||||||
|
}
|
||||||
|
if watchedPath {
|
||||||
|
w.mu.Lock()
|
||||||
|
delete(w.watches, path)
|
||||||
|
w.mu.Unlock()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we didn't get a deletion the file still exists and we're going to have
|
||||||
|
// to watch it again. Let's Stat it now so that we can compare permissions
|
||||||
|
// and have what we need to continue watching the file
|
||||||
|
|
||||||
|
stat, err := os.Lstat(path)
|
||||||
|
if err != nil {
|
||||||
|
// This is unexpected, but we should still emit an event. This happens
|
||||||
|
// most often on "rm -r" of a subdirectory inside a watched directory We
|
||||||
|
// get a modify event of something happening inside, but by the time we
|
||||||
|
// get here, the sudirectory is already gone. Clearly we were watching
|
||||||
|
// this path but now it is gone. Let's tell the user that it was
|
||||||
|
// removed.
|
||||||
|
if !w.sendEvent(path, Remove) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Suppress extra write events on removed directories; they are not
|
||||||
|
// informative and can be confusing.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// resolve symlinks that were explicitly watched as we would have at Add()
|
||||||
|
// time. this helps suppress spurious Chmod events on watched symlinks
|
||||||
|
if isWatched {
|
||||||
|
stat, err = os.Stat(path)
|
||||||
|
if err != nil {
|
||||||
|
// The symlink still exists, but the target is gone. Report the
|
||||||
|
// Remove similar to above.
|
||||||
|
if !w.sendEvent(path, Remove) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Don't return the error
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if events&unix.FILE_MODIFIED != 0 {
|
||||||
|
if fmode.IsDir() {
|
||||||
|
if watchedDir {
|
||||||
|
if err := w.updateDirectory(path); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if !w.sendEvent(path, Write) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if !w.sendEvent(path, Write) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if events&unix.FILE_ATTRIB != 0 && stat != nil {
|
||||||
|
// Only send Chmod if perms changed
|
||||||
|
if stat.Mode().Perm() != fmode.Perm() {
|
||||||
|
if !w.sendEvent(path, Chmod) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if stat != nil {
|
||||||
|
// If we get here, it means we've hit an event above that requires us to
|
||||||
|
// continue watching the file or directory
|
||||||
|
return w.associateFile(path, stat, isWatched)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Watcher) updateDirectory(path string) error {
|
||||||
|
// The directory was modified, so we must find unwatched entities and watch
|
||||||
|
// them. If something was removed from the directory, nothing will happen,
|
||||||
|
// as everything else should still be watched.
|
||||||
|
files, err := os.ReadDir(path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, entry := range files {
|
||||||
|
path := filepath.Join(path, entry.Name())
|
||||||
|
if w.port.PathIsWatched(path) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
finfo, err := entry.Info()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = w.associateFile(path, finfo, false)
|
||||||
|
if err != nil {
|
||||||
|
if !w.sendError(err) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !w.sendEvent(path, Create) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) error {
|
||||||
|
if w.isClosed() {
|
||||||
|
return ErrClosed
|
||||||
|
}
|
||||||
|
// This is primarily protecting the call to AssociatePath but it is
|
||||||
|
// important and intentional that the call to PathIsWatched is also
|
||||||
|
// protected by this mutex. Without this mutex, AssociatePath has been seen
|
||||||
|
// to error out that the path is already associated.
|
||||||
|
w.mu.Lock()
|
||||||
|
defer w.mu.Unlock()
|
||||||
|
|
||||||
|
if w.port.PathIsWatched(path) {
|
||||||
|
// Remove the old association in favor of this one If we get ENOENT,
|
||||||
|
// then while the x/sys/unix wrapper still thought that this path was
|
||||||
|
// associated, the underlying event port did not. This call will have
|
||||||
|
// cleared up that discrepancy. The most likely cause is that the event
|
||||||
|
// has fired but we haven't processed it yet.
|
||||||
|
err := w.port.DissociatePath(path)
|
||||||
|
if err != nil && err != unix.ENOENT {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// FILE_NOFOLLOW means we watch symlinks themselves rather than their
|
||||||
|
// targets.
|
||||||
|
events := unix.FILE_MODIFIED | unix.FILE_ATTRIB | unix.FILE_NOFOLLOW
|
||||||
|
if follow {
|
||||||
|
// We *DO* follow symlinks for explicitly watched entries.
|
||||||
|
events = unix.FILE_MODIFIED | unix.FILE_ATTRIB
|
||||||
|
}
|
||||||
|
return w.port.AssociatePath(path, stat,
|
||||||
|
events,
|
||||||
|
stat.Mode())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Watcher) dissociateFile(path string, stat os.FileInfo, unused bool) error {
|
||||||
|
if !w.port.PathIsWatched(path) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return w.port.DissociatePath(path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
|
||||||
|
// yet removed).
|
||||||
|
//
|
||||||
|
// Returns nil if [Watcher.Close] was called.
|
||||||
|
func (w *Watcher) WatchList() []string {
|
||||||
|
if w.isClosed() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
w.mu.Lock()
|
||||||
|
defer w.mu.Unlock()
|
||||||
|
|
||||||
|
entries := make([]string, 0, len(w.watches)+len(w.dirs))
|
||||||
|
for pathname := range w.dirs {
|
||||||
|
entries = append(entries, pathname)
|
||||||
|
}
|
||||||
|
for pathname := range w.watches {
|
||||||
|
entries = append(entries, pathname)
|
||||||
|
}
|
||||||
|
|
||||||
|
return entries
|
||||||
|
}
|
||||||
|
381
vendor/github.com/fsnotify/fsnotify/backend_inotify.go
generated
vendored
381
vendor/github.com/fsnotify/fsnotify/backend_inotify.go
generated
vendored
@ -1,5 +1,8 @@
|
|||||||
//go:build linux
|
//go:build linux && !appengine
|
||||||
// +build linux
|
// +build linux,!appengine
|
||||||
|
|
||||||
|
// Note: the documentation on the Watcher type and methods is generated from
|
||||||
|
// mkdoc.zsh
|
||||||
|
|
||||||
package fsnotify
|
package fsnotify
|
||||||
|
|
||||||
@ -26,9 +29,9 @@ import (
|
|||||||
// When a file is removed a Remove event won't be emitted until all file
|
// When a file is removed a Remove event won't be emitted until all file
|
||||||
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
||||||
//
|
//
|
||||||
// fp := os.Open("file")
|
// fp := os.Open("file")
|
||||||
// os.Remove("file") // Triggers Chmod
|
// os.Remove("file") // Triggers Chmod
|
||||||
// fp.Close() // Triggers Remove
|
// fp.Close() // Triggers Remove
|
||||||
//
|
//
|
||||||
// This is the event that inotify sends, so not much can be changed about this.
|
// This is the event that inotify sends, so not much can be changed about this.
|
||||||
//
|
//
|
||||||
@ -42,16 +45,16 @@ import (
|
|||||||
//
|
//
|
||||||
// To increase them you can use sysctl or write the value to the /proc file:
|
// To increase them you can use sysctl or write the value to the /proc file:
|
||||||
//
|
//
|
||||||
// # Default values on Linux 5.18
|
// # Default values on Linux 5.18
|
||||||
// sysctl fs.inotify.max_user_watches=124983
|
// sysctl fs.inotify.max_user_watches=124983
|
||||||
// sysctl fs.inotify.max_user_instances=128
|
// sysctl fs.inotify.max_user_instances=128
|
||||||
//
|
//
|
||||||
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
||||||
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
||||||
// your distro's documentation):
|
// your distro's documentation):
|
||||||
//
|
//
|
||||||
// fs.inotify.max_user_watches=124983
|
// fs.inotify.max_user_watches=124983
|
||||||
// fs.inotify.max_user_instances=128
|
// fs.inotify.max_user_instances=128
|
||||||
//
|
//
|
||||||
// Reaching the limit will result in a "no space left on device" or "too many open
|
// Reaching the limit will result in a "no space left on device" or "too many open
|
||||||
// files" error.
|
// files" error.
|
||||||
@ -67,14 +70,20 @@ import (
|
|||||||
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
||||||
// systems.
|
// systems.
|
||||||
//
|
//
|
||||||
// # macOS notes
|
// # Windows notes
|
||||||
//
|
//
|
||||||
// Spotlight indexing on macOS can result in multiple events (see [#15]). A
|
// Paths can be added as "C:\path\to\dir", but forward slashes
|
||||||
// temporary workaround is to add your folder(s) to the "Spotlight Privacy
|
// ("C:/path/to/dir") will also work.
|
||||||
// Settings" until we have a native FSEvents implementation (see [#11]).
|
|
||||||
//
|
//
|
||||||
// [#11]: https://github.com/fsnotify/fsnotify/issues/11
|
// When a watched directory is removed it will always send an event for the
|
||||||
// [#15]: https://github.com/fsnotify/fsnotify/issues/15
|
// directory itself, but may not send events for all files in that directory.
|
||||||
|
// Sometimes it will send events for all times, sometimes it will send no
|
||||||
|
// events, and often only for some files.
|
||||||
|
//
|
||||||
|
// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
|
||||||
|
// value that is guaranteed to work with SMB filesystems. If you have many
|
||||||
|
// events in quick succession this may not be enough, and you will have to use
|
||||||
|
// [WithBufferSize] to increase the value.
|
||||||
type Watcher struct {
|
type Watcher struct {
|
||||||
// Events sends the filesystem change events.
|
// Events sends the filesystem change events.
|
||||||
//
|
//
|
||||||
@ -101,36 +110,148 @@ type Watcher struct {
|
|||||||
// initiated by the user may show up as one or multiple
|
// initiated by the user may show up as one or multiple
|
||||||
// writes, depending on when the system syncs things to
|
// writes, depending on when the system syncs things to
|
||||||
// disk. For example when compiling a large Go program
|
// disk. For example when compiling a large Go program
|
||||||
// you may get hundreds of Write events, so you
|
// you may get hundreds of Write events, and you may
|
||||||
// probably want to wait until you've stopped receiving
|
// want to wait until you've stopped receiving them
|
||||||
// them (see the dedup example in cmd/fsnotify).
|
// (see the dedup example in cmd/fsnotify).
|
||||||
|
//
|
||||||
|
// Some systems may send Write event for directories
|
||||||
|
// when the directory content changes.
|
||||||
//
|
//
|
||||||
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
||||||
// when a file is removed (or more accurately, when a
|
// when a file is removed (or more accurately, when a
|
||||||
// link to an inode is removed). On kqueue it's sent
|
// link to an inode is removed). On kqueue it's sent
|
||||||
// and on kqueue when a file is truncated. On Windows
|
// when a file is truncated. On Windows it's never
|
||||||
// it's never sent.
|
// sent.
|
||||||
Events chan Event
|
Events chan Event
|
||||||
|
|
||||||
// Errors sends any errors.
|
// Errors sends any errors.
|
||||||
|
//
|
||||||
|
// ErrEventOverflow is used to indicate there are too many events:
|
||||||
|
//
|
||||||
|
// - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
|
||||||
|
// - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
|
||||||
|
// - kqueue, fen: Not used.
|
||||||
Errors chan error
|
Errors chan error
|
||||||
|
|
||||||
// Store fd here as os.File.Read() will no longer return on close after
|
// Store fd here as os.File.Read() will no longer return on close after
|
||||||
// calling Fd(). See: https://github.com/golang/go/issues/26439
|
// calling Fd(). See: https://github.com/golang/go/issues/26439
|
||||||
fd int
|
fd int
|
||||||
mu sync.Mutex // Map access
|
|
||||||
inotifyFile *os.File
|
inotifyFile *os.File
|
||||||
watches map[string]*watch // Map of inotify watches (key: path)
|
watches *watches
|
||||||
paths map[int]string // Map of watched paths (key: watch descriptor)
|
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
|
||||||
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
|
closeMu sync.Mutex
|
||||||
doneResp chan struct{} // Channel to respond to Close
|
doneResp chan struct{} // Channel to respond to Close
|
||||||
|
}
|
||||||
|
|
||||||
|
type (
|
||||||
|
watches struct {
|
||||||
|
mu sync.RWMutex
|
||||||
|
wd map[uint32]*watch // wd → watch
|
||||||
|
path map[string]uint32 // pathname → wd
|
||||||
|
}
|
||||||
|
watch struct {
|
||||||
|
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
|
||||||
|
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
|
||||||
|
path string // Watch path.
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func newWatches() *watches {
|
||||||
|
return &watches{
|
||||||
|
wd: make(map[uint32]*watch),
|
||||||
|
path: make(map[string]uint32),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *watches) len() int {
|
||||||
|
w.mu.RLock()
|
||||||
|
defer w.mu.RUnlock()
|
||||||
|
return len(w.wd)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *watches) add(ww *watch) {
|
||||||
|
w.mu.Lock()
|
||||||
|
defer w.mu.Unlock()
|
||||||
|
w.wd[ww.wd] = ww
|
||||||
|
w.path[ww.path] = ww.wd
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *watches) remove(wd uint32) {
|
||||||
|
w.mu.Lock()
|
||||||
|
defer w.mu.Unlock()
|
||||||
|
delete(w.path, w.wd[wd].path)
|
||||||
|
delete(w.wd, wd)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *watches) removePath(path string) (uint32, bool) {
|
||||||
|
w.mu.Lock()
|
||||||
|
defer w.mu.Unlock()
|
||||||
|
|
||||||
|
wd, ok := w.path[path]
|
||||||
|
if !ok {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
delete(w.path, path)
|
||||||
|
delete(w.wd, wd)
|
||||||
|
|
||||||
|
return wd, true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *watches) byPath(path string) *watch {
|
||||||
|
w.mu.RLock()
|
||||||
|
defer w.mu.RUnlock()
|
||||||
|
return w.wd[w.path[path]]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *watches) byWd(wd uint32) *watch {
|
||||||
|
w.mu.RLock()
|
||||||
|
defer w.mu.RUnlock()
|
||||||
|
return w.wd[wd]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error {
|
||||||
|
w.mu.Lock()
|
||||||
|
defer w.mu.Unlock()
|
||||||
|
|
||||||
|
var existing *watch
|
||||||
|
wd, ok := w.path[path]
|
||||||
|
if ok {
|
||||||
|
existing = w.wd[wd]
|
||||||
|
}
|
||||||
|
|
||||||
|
upd, err := f(existing)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if upd != nil {
|
||||||
|
w.wd[upd.wd] = upd
|
||||||
|
w.path[upd.path] = upd.wd
|
||||||
|
|
||||||
|
if upd.wd != wd {
|
||||||
|
delete(w.wd, wd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewWatcher creates a new Watcher.
|
// NewWatcher creates a new Watcher.
|
||||||
func NewWatcher() (*Watcher, error) {
|
func NewWatcher() (*Watcher, error) {
|
||||||
// Create inotify fd
|
return NewBufferedWatcher(0)
|
||||||
// Need to set the FD to nonblocking mode in order for SetDeadline methods to work
|
}
|
||||||
// Otherwise, blocking i/o operations won't terminate on close
|
|
||||||
|
// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
|
||||||
|
// channel.
|
||||||
|
//
|
||||||
|
// The main use case for this is situations with a very large number of events
|
||||||
|
// where the kernel buffer size can't be increased (e.g. due to lack of
|
||||||
|
// permissions). An unbuffered Watcher will perform better for almost all use
|
||||||
|
// cases, and whenever possible you will be better off increasing the kernel
|
||||||
|
// buffers instead of adding a large userspace buffer.
|
||||||
|
func NewBufferedWatcher(sz uint) (*Watcher, error) {
|
||||||
|
// Need to set nonblocking mode for SetDeadline to work, otherwise blocking
|
||||||
|
// I/O operations won't terminate on close.
|
||||||
fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK)
|
fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK)
|
||||||
if fd == -1 {
|
if fd == -1 {
|
||||||
return nil, errno
|
return nil, errno
|
||||||
@ -139,9 +260,8 @@ func NewWatcher() (*Watcher, error) {
|
|||||||
w := &Watcher{
|
w := &Watcher{
|
||||||
fd: fd,
|
fd: fd,
|
||||||
inotifyFile: os.NewFile(uintptr(fd), ""),
|
inotifyFile: os.NewFile(uintptr(fd), ""),
|
||||||
watches: make(map[string]*watch),
|
watches: newWatches(),
|
||||||
paths: make(map[int]string),
|
Events: make(chan Event, sz),
|
||||||
Events: make(chan Event),
|
|
||||||
Errors: make(chan error),
|
Errors: make(chan error),
|
||||||
done: make(chan struct{}),
|
done: make(chan struct{}),
|
||||||
doneResp: make(chan struct{}),
|
doneResp: make(chan struct{}),
|
||||||
@ -157,8 +277,8 @@ func (w *Watcher) sendEvent(e Event) bool {
|
|||||||
case w.Events <- e:
|
case w.Events <- e:
|
||||||
return true
|
return true
|
||||||
case <-w.done:
|
case <-w.done:
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns true if the error was sent, or false if watcher is closed.
|
// Returns true if the error was sent, or false if watcher is closed.
|
||||||
@ -180,17 +300,15 @@ func (w *Watcher) isClosed() bool {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close removes all watches and closes the events channel.
|
// Close removes all watches and closes the Events channel.
|
||||||
func (w *Watcher) Close() error {
|
func (w *Watcher) Close() error {
|
||||||
w.mu.Lock()
|
w.closeMu.Lock()
|
||||||
if w.isClosed() {
|
if w.isClosed() {
|
||||||
w.mu.Unlock()
|
w.closeMu.Unlock()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Send 'close' signal to goroutine, and set the Watcher to closed.
|
|
||||||
close(w.done)
|
close(w.done)
|
||||||
w.mu.Unlock()
|
w.closeMu.Unlock()
|
||||||
|
|
||||||
// Causes any blocking reads to return with an error, provided the file
|
// Causes any blocking reads to return with an error, provided the file
|
||||||
// still supports deadline operations.
|
// still supports deadline operations.
|
||||||
@ -207,17 +325,21 @@ func (w *Watcher) Close() error {
|
|||||||
|
|
||||||
// Add starts monitoring the path for changes.
|
// Add starts monitoring the path for changes.
|
||||||
//
|
//
|
||||||
// A path can only be watched once; attempting to watch it more than once will
|
// A path can only be watched once; watching it more than once is a no-op and will
|
||||||
// return an error. Paths that do not yet exist on the filesystem cannot be
|
// not return an error. Paths that do not yet exist on the filesystem cannot be
|
||||||
// added. A watch will be automatically removed if the path is deleted.
|
// watched.
|
||||||
//
|
//
|
||||||
// A path will remain watched if it gets renamed to somewhere else on the same
|
// A watch will be automatically removed if the watched path is deleted or
|
||||||
// filesystem, but the monitor will get removed if the path gets deleted and
|
// renamed. The exception is the Windows backend, which doesn't remove the
|
||||||
// re-created, or if it's moved to a different filesystem.
|
// watcher on renames.
|
||||||
//
|
//
|
||||||
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
||||||
// filesystems (/proc, /sys, etc.) generally don't work.
|
// filesystems (/proc, /sys, etc.) generally don't work.
|
||||||
//
|
//
|
||||||
|
// Returns [ErrClosed] if [Watcher.Close] was called.
|
||||||
|
//
|
||||||
|
// See [Watcher.AddWith] for a version that allows adding options.
|
||||||
|
//
|
||||||
// # Watching directories
|
// # Watching directories
|
||||||
//
|
//
|
||||||
// All files in a directory are monitored, including new files that are created
|
// All files in a directory are monitored, including new files that are created
|
||||||
@ -227,44 +349,59 @@ func (w *Watcher) Close() error {
|
|||||||
// # Watching files
|
// # Watching files
|
||||||
//
|
//
|
||||||
// Watching individual files (rather than directories) is generally not
|
// Watching individual files (rather than directories) is generally not
|
||||||
// recommended as many tools update files atomically. Instead of "just" writing
|
// recommended as many programs (especially editors) update files atomically: it
|
||||||
// to the file a temporary file will be written to first, and if successful the
|
// will write to a temporary file which is then moved to to destination,
|
||||||
// temporary file is moved to to destination removing the original, or some
|
// overwriting the original (or some variant thereof). The watcher on the
|
||||||
// variant thereof. The watcher on the original file is now lost, as it no
|
// original file is now lost, as that no longer exists.
|
||||||
// longer exists.
|
|
||||||
//
|
//
|
||||||
// Instead, watch the parent directory and use Event.Name to filter out files
|
// The upshot of this is that a power failure or crash won't leave a
|
||||||
// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
|
// half-written file.
|
||||||
func (w *Watcher) Add(name string) error {
|
//
|
||||||
name = filepath.Clean(name)
|
// Watch the parent directory and use Event.Name to filter out files you're not
|
||||||
|
// interested in. There is an example of this in cmd/fsnotify/file.go.
|
||||||
|
func (w *Watcher) Add(name string) error { return w.AddWith(name) }
|
||||||
|
|
||||||
|
// AddWith is like [Watcher.Add], but allows adding options. When using Add()
|
||||||
|
// the defaults described below are used.
|
||||||
|
//
|
||||||
|
// Possible options are:
|
||||||
|
//
|
||||||
|
// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
|
||||||
|
// other platforms. The default is 64K (65536 bytes).
|
||||||
|
func (w *Watcher) AddWith(name string, opts ...addOpt) error {
|
||||||
if w.isClosed() {
|
if w.isClosed() {
|
||||||
return errors.New("inotify instance already closed")
|
return ErrClosed
|
||||||
}
|
}
|
||||||
|
|
||||||
|
name = filepath.Clean(name)
|
||||||
|
_ = getOptions(opts...)
|
||||||
|
|
||||||
var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
|
var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
|
||||||
unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
|
unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
|
||||||
unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
|
unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
|
||||||
|
|
||||||
w.mu.Lock()
|
return w.watches.updatePath(name, func(existing *watch) (*watch, error) {
|
||||||
defer w.mu.Unlock()
|
if existing != nil {
|
||||||
watchEntry := w.watches[name]
|
flags |= existing.flags | unix.IN_MASK_ADD
|
||||||
if watchEntry != nil {
|
}
|
||||||
flags |= watchEntry.flags | unix.IN_MASK_ADD
|
|
||||||
}
|
|
||||||
wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
|
|
||||||
if wd == -1 {
|
|
||||||
return errno
|
|
||||||
}
|
|
||||||
|
|
||||||
if watchEntry == nil {
|
wd, err := unix.InotifyAddWatch(w.fd, name, flags)
|
||||||
w.watches[name] = &watch{wd: uint32(wd), flags: flags}
|
if wd == -1 {
|
||||||
w.paths[wd] = name
|
return nil, err
|
||||||
} else {
|
}
|
||||||
watchEntry.wd = uint32(wd)
|
|
||||||
watchEntry.flags = flags
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
if existing == nil {
|
||||||
|
return &watch{
|
||||||
|
wd: uint32(wd),
|
||||||
|
path: name,
|
||||||
|
flags: flags,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
existing.wd = uint32(wd)
|
||||||
|
existing.flags = flags
|
||||||
|
return existing, nil
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove stops monitoring the path for changes.
|
// Remove stops monitoring the path for changes.
|
||||||
@ -273,32 +410,22 @@ func (w *Watcher) Add(name string) error {
|
|||||||
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
||||||
//
|
//
|
||||||
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
||||||
|
//
|
||||||
|
// Returns nil if [Watcher.Close] was called.
|
||||||
func (w *Watcher) Remove(name string) error {
|
func (w *Watcher) Remove(name string) error {
|
||||||
name = filepath.Clean(name)
|
if w.isClosed() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return w.remove(filepath.Clean(name))
|
||||||
|
}
|
||||||
|
|
||||||
// Fetch the watch.
|
func (w *Watcher) remove(name string) error {
|
||||||
w.mu.Lock()
|
wd, ok := w.watches.removePath(name)
|
||||||
defer w.mu.Unlock()
|
|
||||||
watch, ok := w.watches[name]
|
|
||||||
|
|
||||||
// Remove it from inotify.
|
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
|
return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// We successfully removed the watch if InotifyRmWatch doesn't return an
|
success, errno := unix.InotifyRmWatch(w.fd, wd)
|
||||||
// error, we need to clean up our internal state to ensure it matches
|
|
||||||
// inotify's kernel state.
|
|
||||||
delete(w.paths, int(watch.wd))
|
|
||||||
delete(w.watches, name)
|
|
||||||
|
|
||||||
// inotify_rm_watch will return EINVAL if the file has been deleted;
|
|
||||||
// the inotify will already have been removed.
|
|
||||||
// watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
|
|
||||||
// by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE
|
|
||||||
// so that EINVAL means that the wd is being rm_watch()ed or its file removed
|
|
||||||
// by another thread and we have not received IN_IGNORE event.
|
|
||||||
success, errno := unix.InotifyRmWatch(w.fd, watch.wd)
|
|
||||||
if success == -1 {
|
if success == -1 {
|
||||||
// TODO: Perhaps it's not helpful to return an error here in every case;
|
// TODO: Perhaps it's not helpful to return an error here in every case;
|
||||||
// The only two possible errors are:
|
// The only two possible errors are:
|
||||||
@ -312,26 +439,26 @@ func (w *Watcher) Remove(name string) error {
|
|||||||
// are watching is deleted.
|
// are watching is deleted.
|
||||||
return errno
|
return errno
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// WatchList returns all paths added with [Add] (and are not yet removed).
|
// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
|
||||||
|
// yet removed).
|
||||||
|
//
|
||||||
|
// Returns nil if [Watcher.Close] was called.
|
||||||
func (w *Watcher) WatchList() []string {
|
func (w *Watcher) WatchList() []string {
|
||||||
w.mu.Lock()
|
if w.isClosed() {
|
||||||
defer w.mu.Unlock()
|
return nil
|
||||||
|
|
||||||
entries := make([]string, 0, len(w.watches))
|
|
||||||
for pathname := range w.watches {
|
|
||||||
entries = append(entries, pathname)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return entries
|
entries := make([]string, 0, w.watches.len())
|
||||||
}
|
w.watches.mu.RLock()
|
||||||
|
for pathname := range w.watches.path {
|
||||||
|
entries = append(entries, pathname)
|
||||||
|
}
|
||||||
|
w.watches.mu.RUnlock()
|
||||||
|
|
||||||
type watch struct {
|
return entries
|
||||||
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
|
|
||||||
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// readEvents reads from the inotify file descriptor, converts the
|
// readEvents reads from the inotify file descriptor, converts the
|
||||||
@ -367,14 +494,11 @@ func (w *Watcher) readEvents() {
|
|||||||
if n < unix.SizeofInotifyEvent {
|
if n < unix.SizeofInotifyEvent {
|
||||||
var err error
|
var err error
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
// If EOF is received. This should really never happen.
|
err = io.EOF // If EOF is received. This should really never happen.
|
||||||
err = io.EOF
|
|
||||||
} else if n < 0 {
|
} else if n < 0 {
|
||||||
// If an error occurred while reading.
|
err = errno // If an error occurred while reading.
|
||||||
err = errno
|
|
||||||
} else {
|
} else {
|
||||||
// Read was too short.
|
err = errors.New("notify: short read in readEvents()") // Read was too short.
|
||||||
err = errors.New("notify: short read in readEvents()")
|
|
||||||
}
|
}
|
||||||
if !w.sendError(err) {
|
if !w.sendError(err) {
|
||||||
return
|
return
|
||||||
@ -403,18 +527,29 @@ func (w *Watcher) readEvents() {
|
|||||||
// doesn't append the filename to the event, but we would like to always fill the
|
// doesn't append the filename to the event, but we would like to always fill the
|
||||||
// the "Name" field with a valid filename. We retrieve the path of the watch from
|
// the "Name" field with a valid filename. We retrieve the path of the watch from
|
||||||
// the "paths" map.
|
// the "paths" map.
|
||||||
w.mu.Lock()
|
watch := w.watches.byWd(uint32(raw.Wd))
|
||||||
name, ok := w.paths[int(raw.Wd)]
|
|
||||||
// IN_DELETE_SELF occurs when the file/directory being watched is removed.
|
|
||||||
// This is a sign to clean up the maps, otherwise we are no longer in sync
|
|
||||||
// with the inotify kernel state which has already deleted the watch
|
|
||||||
// automatically.
|
|
||||||
if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
|
|
||||||
delete(w.paths, int(raw.Wd))
|
|
||||||
delete(w.watches, name)
|
|
||||||
}
|
|
||||||
w.mu.Unlock()
|
|
||||||
|
|
||||||
|
// inotify will automatically remove the watch on deletes; just need
|
||||||
|
// to clean our state here.
|
||||||
|
if watch != nil && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
|
||||||
|
w.watches.remove(watch.wd)
|
||||||
|
}
|
||||||
|
// We can't really update the state when a watched path is moved;
|
||||||
|
// only IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove
|
||||||
|
// the watch.
|
||||||
|
if watch != nil && mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF {
|
||||||
|
err := w.remove(watch.path)
|
||||||
|
if err != nil && !errors.Is(err, ErrNonExistentWatch) {
|
||||||
|
if !w.sendError(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var name string
|
||||||
|
if watch != nil {
|
||||||
|
name = watch.path
|
||||||
|
}
|
||||||
if nameLen > 0 {
|
if nameLen > 0 {
|
||||||
// Point "bytes" at the first byte of the filename
|
// Point "bytes" at the first byte of the filename
|
||||||
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen]
|
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen]
|
||||||
|
293
vendor/github.com/fsnotify/fsnotify/backend_kqueue.go
generated
vendored
293
vendor/github.com/fsnotify/fsnotify/backend_kqueue.go
generated
vendored
@ -1,12 +1,14 @@
|
|||||||
//go:build freebsd || openbsd || netbsd || dragonfly || darwin
|
//go:build freebsd || openbsd || netbsd || dragonfly || darwin
|
||||||
// +build freebsd openbsd netbsd dragonfly darwin
|
// +build freebsd openbsd netbsd dragonfly darwin
|
||||||
|
|
||||||
|
// Note: the documentation on the Watcher type and methods is generated from
|
||||||
|
// mkdoc.zsh
|
||||||
|
|
||||||
package fsnotify
|
package fsnotify
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sync"
|
"sync"
|
||||||
@ -24,9 +26,9 @@ import (
|
|||||||
// When a file is removed a Remove event won't be emitted until all file
|
// When a file is removed a Remove event won't be emitted until all file
|
||||||
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
||||||
//
|
//
|
||||||
// fp := os.Open("file")
|
// fp := os.Open("file")
|
||||||
// os.Remove("file") // Triggers Chmod
|
// os.Remove("file") // Triggers Chmod
|
||||||
// fp.Close() // Triggers Remove
|
// fp.Close() // Triggers Remove
|
||||||
//
|
//
|
||||||
// This is the event that inotify sends, so not much can be changed about this.
|
// This is the event that inotify sends, so not much can be changed about this.
|
||||||
//
|
//
|
||||||
@ -40,16 +42,16 @@ import (
|
|||||||
//
|
//
|
||||||
// To increase them you can use sysctl or write the value to the /proc file:
|
// To increase them you can use sysctl or write the value to the /proc file:
|
||||||
//
|
//
|
||||||
// # Default values on Linux 5.18
|
// # Default values on Linux 5.18
|
||||||
// sysctl fs.inotify.max_user_watches=124983
|
// sysctl fs.inotify.max_user_watches=124983
|
||||||
// sysctl fs.inotify.max_user_instances=128
|
// sysctl fs.inotify.max_user_instances=128
|
||||||
//
|
//
|
||||||
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
||||||
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
||||||
// your distro's documentation):
|
// your distro's documentation):
|
||||||
//
|
//
|
||||||
// fs.inotify.max_user_watches=124983
|
// fs.inotify.max_user_watches=124983
|
||||||
// fs.inotify.max_user_instances=128
|
// fs.inotify.max_user_instances=128
|
||||||
//
|
//
|
||||||
// Reaching the limit will result in a "no space left on device" or "too many open
|
// Reaching the limit will result in a "no space left on device" or "too many open
|
||||||
// files" error.
|
// files" error.
|
||||||
@ -65,14 +67,20 @@ import (
|
|||||||
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
||||||
// systems.
|
// systems.
|
||||||
//
|
//
|
||||||
// # macOS notes
|
// # Windows notes
|
||||||
//
|
//
|
||||||
// Spotlight indexing on macOS can result in multiple events (see [#15]). A
|
// Paths can be added as "C:\path\to\dir", but forward slashes
|
||||||
// temporary workaround is to add your folder(s) to the "Spotlight Privacy
|
// ("C:/path/to/dir") will also work.
|
||||||
// Settings" until we have a native FSEvents implementation (see [#11]).
|
|
||||||
//
|
//
|
||||||
// [#11]: https://github.com/fsnotify/fsnotify/issues/11
|
// When a watched directory is removed it will always send an event for the
|
||||||
// [#15]: https://github.com/fsnotify/fsnotify/issues/15
|
// directory itself, but may not send events for all files in that directory.
|
||||||
|
// Sometimes it will send events for all times, sometimes it will send no
|
||||||
|
// events, and often only for some files.
|
||||||
|
//
|
||||||
|
// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
|
||||||
|
// value that is guaranteed to work with SMB filesystems. If you have many
|
||||||
|
// events in quick succession this may not be enough, and you will have to use
|
||||||
|
// [WithBufferSize] to increase the value.
|
||||||
type Watcher struct {
|
type Watcher struct {
|
||||||
// Events sends the filesystem change events.
|
// Events sends the filesystem change events.
|
||||||
//
|
//
|
||||||
@ -99,18 +107,27 @@ type Watcher struct {
|
|||||||
// initiated by the user may show up as one or multiple
|
// initiated by the user may show up as one or multiple
|
||||||
// writes, depending on when the system syncs things to
|
// writes, depending on when the system syncs things to
|
||||||
// disk. For example when compiling a large Go program
|
// disk. For example when compiling a large Go program
|
||||||
// you may get hundreds of Write events, so you
|
// you may get hundreds of Write events, and you may
|
||||||
// probably want to wait until you've stopped receiving
|
// want to wait until you've stopped receiving them
|
||||||
// them (see the dedup example in cmd/fsnotify).
|
// (see the dedup example in cmd/fsnotify).
|
||||||
|
//
|
||||||
|
// Some systems may send Write event for directories
|
||||||
|
// when the directory content changes.
|
||||||
//
|
//
|
||||||
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
||||||
// when a file is removed (or more accurately, when a
|
// when a file is removed (or more accurately, when a
|
||||||
// link to an inode is removed). On kqueue it's sent
|
// link to an inode is removed). On kqueue it's sent
|
||||||
// and on kqueue when a file is truncated. On Windows
|
// when a file is truncated. On Windows it's never
|
||||||
// it's never sent.
|
// sent.
|
||||||
Events chan Event
|
Events chan Event
|
||||||
|
|
||||||
// Errors sends any errors.
|
// Errors sends any errors.
|
||||||
|
//
|
||||||
|
// ErrEventOverflow is used to indicate there are too many events:
|
||||||
|
//
|
||||||
|
// - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
|
||||||
|
// - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
|
||||||
|
// - kqueue, fen: Not used.
|
||||||
Errors chan error
|
Errors chan error
|
||||||
|
|
||||||
done chan struct{}
|
done chan struct{}
|
||||||
@ -133,6 +150,18 @@ type pathInfo struct {
|
|||||||
|
|
||||||
// NewWatcher creates a new Watcher.
|
// NewWatcher creates a new Watcher.
|
||||||
func NewWatcher() (*Watcher, error) {
|
func NewWatcher() (*Watcher, error) {
|
||||||
|
return NewBufferedWatcher(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
|
||||||
|
// channel.
|
||||||
|
//
|
||||||
|
// The main use case for this is situations with a very large number of events
|
||||||
|
// where the kernel buffer size can't be increased (e.g. due to lack of
|
||||||
|
// permissions). An unbuffered Watcher will perform better for almost all use
|
||||||
|
// cases, and whenever possible you will be better off increasing the kernel
|
||||||
|
// buffers instead of adding a large userspace buffer.
|
||||||
|
func NewBufferedWatcher(sz uint) (*Watcher, error) {
|
||||||
kq, closepipe, err := newKqueue()
|
kq, closepipe, err := newKqueue()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -147,7 +176,7 @@ func NewWatcher() (*Watcher, error) {
|
|||||||
paths: make(map[int]pathInfo),
|
paths: make(map[int]pathInfo),
|
||||||
fileExists: make(map[string]struct{}),
|
fileExists: make(map[string]struct{}),
|
||||||
userWatches: make(map[string]struct{}),
|
userWatches: make(map[string]struct{}),
|
||||||
Events: make(chan Event),
|
Events: make(chan Event, sz),
|
||||||
Errors: make(chan error),
|
Errors: make(chan error),
|
||||||
done: make(chan struct{}),
|
done: make(chan struct{}),
|
||||||
}
|
}
|
||||||
@ -197,8 +226,8 @@ func (w *Watcher) sendEvent(e Event) bool {
|
|||||||
case w.Events <- e:
|
case w.Events <- e:
|
||||||
return true
|
return true
|
||||||
case <-w.done:
|
case <-w.done:
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns true if the error was sent, or false if watcher is closed.
|
// Returns true if the error was sent, or false if watcher is closed.
|
||||||
@ -207,11 +236,11 @@ func (w *Watcher) sendError(err error) bool {
|
|||||||
case w.Errors <- err:
|
case w.Errors <- err:
|
||||||
return true
|
return true
|
||||||
case <-w.done:
|
case <-w.done:
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close removes all watches and closes the events channel.
|
// Close removes all watches and closes the Events channel.
|
||||||
func (w *Watcher) Close() error {
|
func (w *Watcher) Close() error {
|
||||||
w.mu.Lock()
|
w.mu.Lock()
|
||||||
if w.isClosed {
|
if w.isClosed {
|
||||||
@ -239,17 +268,21 @@ func (w *Watcher) Close() error {
|
|||||||
|
|
||||||
// Add starts monitoring the path for changes.
|
// Add starts monitoring the path for changes.
|
||||||
//
|
//
|
||||||
// A path can only be watched once; attempting to watch it more than once will
|
// A path can only be watched once; watching it more than once is a no-op and will
|
||||||
// return an error. Paths that do not yet exist on the filesystem cannot be
|
// not return an error. Paths that do not yet exist on the filesystem cannot be
|
||||||
// added. A watch will be automatically removed if the path is deleted.
|
// watched.
|
||||||
//
|
//
|
||||||
// A path will remain watched if it gets renamed to somewhere else on the same
|
// A watch will be automatically removed if the watched path is deleted or
|
||||||
// filesystem, but the monitor will get removed if the path gets deleted and
|
// renamed. The exception is the Windows backend, which doesn't remove the
|
||||||
// re-created, or if it's moved to a different filesystem.
|
// watcher on renames.
|
||||||
//
|
//
|
||||||
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
||||||
// filesystems (/proc, /sys, etc.) generally don't work.
|
// filesystems (/proc, /sys, etc.) generally don't work.
|
||||||
//
|
//
|
||||||
|
// Returns [ErrClosed] if [Watcher.Close] was called.
|
||||||
|
//
|
||||||
|
// See [Watcher.AddWith] for a version that allows adding options.
|
||||||
|
//
|
||||||
// # Watching directories
|
// # Watching directories
|
||||||
//
|
//
|
||||||
// All files in a directory are monitored, including new files that are created
|
// All files in a directory are monitored, including new files that are created
|
||||||
@ -259,15 +292,28 @@ func (w *Watcher) Close() error {
|
|||||||
// # Watching files
|
// # Watching files
|
||||||
//
|
//
|
||||||
// Watching individual files (rather than directories) is generally not
|
// Watching individual files (rather than directories) is generally not
|
||||||
// recommended as many tools update files atomically. Instead of "just" writing
|
// recommended as many programs (especially editors) update files atomically: it
|
||||||
// to the file a temporary file will be written to first, and if successful the
|
// will write to a temporary file which is then moved to to destination,
|
||||||
// temporary file is moved to to destination removing the original, or some
|
// overwriting the original (or some variant thereof). The watcher on the
|
||||||
// variant thereof. The watcher on the original file is now lost, as it no
|
// original file is now lost, as that no longer exists.
|
||||||
// longer exists.
|
|
||||||
//
|
//
|
||||||
// Instead, watch the parent directory and use Event.Name to filter out files
|
// The upshot of this is that a power failure or crash won't leave a
|
||||||
// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
|
// half-written file.
|
||||||
func (w *Watcher) Add(name string) error {
|
//
|
||||||
|
// Watch the parent directory and use Event.Name to filter out files you're not
|
||||||
|
// interested in. There is an example of this in cmd/fsnotify/file.go.
|
||||||
|
func (w *Watcher) Add(name string) error { return w.AddWith(name) }
|
||||||
|
|
||||||
|
// AddWith is like [Watcher.Add], but allows adding options. When using Add()
|
||||||
|
// the defaults described below are used.
|
||||||
|
//
|
||||||
|
// Possible options are:
|
||||||
|
//
|
||||||
|
// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
|
||||||
|
// other platforms. The default is 64K (65536 bytes).
|
||||||
|
func (w *Watcher) AddWith(name string, opts ...addOpt) error {
|
||||||
|
_ = getOptions(opts...)
|
||||||
|
|
||||||
w.mu.Lock()
|
w.mu.Lock()
|
||||||
w.userWatches[name] = struct{}{}
|
w.userWatches[name] = struct{}{}
|
||||||
w.mu.Unlock()
|
w.mu.Unlock()
|
||||||
@ -281,9 +327,19 @@ func (w *Watcher) Add(name string) error {
|
|||||||
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
||||||
//
|
//
|
||||||
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
||||||
|
//
|
||||||
|
// Returns nil if [Watcher.Close] was called.
|
||||||
func (w *Watcher) Remove(name string) error {
|
func (w *Watcher) Remove(name string) error {
|
||||||
|
return w.remove(name, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Watcher) remove(name string, unwatchFiles bool) error {
|
||||||
name = filepath.Clean(name)
|
name = filepath.Clean(name)
|
||||||
w.mu.Lock()
|
w.mu.Lock()
|
||||||
|
if w.isClosed {
|
||||||
|
w.mu.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
watchfd, ok := w.watches[name]
|
watchfd, ok := w.watches[name]
|
||||||
w.mu.Unlock()
|
w.mu.Unlock()
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -315,7 +371,7 @@ func (w *Watcher) Remove(name string) error {
|
|||||||
w.mu.Unlock()
|
w.mu.Unlock()
|
||||||
|
|
||||||
// Find all watched paths that are in this directory that are not external.
|
// Find all watched paths that are in this directory that are not external.
|
||||||
if isDir {
|
if unwatchFiles && isDir {
|
||||||
var pathsToRemove []string
|
var pathsToRemove []string
|
||||||
w.mu.Lock()
|
w.mu.Lock()
|
||||||
for fd := range w.watchesByDir[name] {
|
for fd := range w.watchesByDir[name] {
|
||||||
@ -326,20 +382,25 @@ func (w *Watcher) Remove(name string) error {
|
|||||||
}
|
}
|
||||||
w.mu.Unlock()
|
w.mu.Unlock()
|
||||||
for _, name := range pathsToRemove {
|
for _, name := range pathsToRemove {
|
||||||
// Since these are internal, not much sense in propagating error
|
// Since these are internal, not much sense in propagating error to
|
||||||
// to the user, as that will just confuse them with an error about
|
// the user, as that will just confuse them with an error about a
|
||||||
// a path they did not explicitly watch themselves.
|
// path they did not explicitly watch themselves.
|
||||||
w.Remove(name)
|
w.Remove(name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// WatchList returns all paths added with [Add] (and are not yet removed).
|
// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
|
||||||
|
// yet removed).
|
||||||
|
//
|
||||||
|
// Returns nil if [Watcher.Close] was called.
|
||||||
func (w *Watcher) WatchList() []string {
|
func (w *Watcher) WatchList() []string {
|
||||||
w.mu.Lock()
|
w.mu.Lock()
|
||||||
defer w.mu.Unlock()
|
defer w.mu.Unlock()
|
||||||
|
if w.isClosed {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
entries := make([]string, 0, len(w.userWatches))
|
entries := make([]string, 0, len(w.userWatches))
|
||||||
for pathname := range w.userWatches {
|
for pathname := range w.userWatches {
|
||||||
@ -352,18 +413,18 @@ func (w *Watcher) WatchList() []string {
|
|||||||
// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
|
// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
|
||||||
const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
|
const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
|
||||||
|
|
||||||
// addWatch adds name to the watched file set.
|
// addWatch adds name to the watched file set; the flags are interpreted as
|
||||||
// The flags are interpreted as described in kevent(2).
|
// described in kevent(2).
|
||||||
// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks.
|
//
|
||||||
|
// Returns the real path to the file which was added, with symlinks resolved.
|
||||||
func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
|
func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
|
||||||
var isDir bool
|
var isDir bool
|
||||||
// Make ./name and name equivalent
|
|
||||||
name = filepath.Clean(name)
|
name = filepath.Clean(name)
|
||||||
|
|
||||||
w.mu.Lock()
|
w.mu.Lock()
|
||||||
if w.isClosed {
|
if w.isClosed {
|
||||||
w.mu.Unlock()
|
w.mu.Unlock()
|
||||||
return "", errors.New("kevent instance already closed")
|
return "", ErrClosed
|
||||||
}
|
}
|
||||||
watchfd, alreadyWatching := w.watches[name]
|
watchfd, alreadyWatching := w.watches[name]
|
||||||
// We already have a watch, but we can still override flags.
|
// We already have a watch, but we can still override flags.
|
||||||
@ -383,27 +444,30 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
|
|||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Follow Symlinks
|
// Follow Symlinks.
|
||||||
//
|
|
||||||
// Linux can add unresolvable symlinks to the watch list without issue,
|
|
||||||
// and Windows can't do symlinks period. To maintain consistency, we
|
|
||||||
// will act like everything is fine if the link can't be resolved.
|
|
||||||
// There will simply be no file events for broken symlinks. Hence the
|
|
||||||
// returns of nil on errors.
|
|
||||||
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
|
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||||
name, err = filepath.EvalSymlinks(name)
|
link, err := os.Readlink(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
// Return nil because Linux can add unresolvable symlinks to the
|
||||||
|
// watch list without problems, so maintain consistency with
|
||||||
|
// that. There will be no file events for broken symlinks.
|
||||||
|
// TODO: more specific check; returns os.PathError; ENOENT?
|
||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
w.mu.Lock()
|
w.mu.Lock()
|
||||||
_, alreadyWatching = w.watches[name]
|
_, alreadyWatching = w.watches[link]
|
||||||
w.mu.Unlock()
|
w.mu.Unlock()
|
||||||
|
|
||||||
if alreadyWatching {
|
if alreadyWatching {
|
||||||
return name, nil
|
// Add to watches so we don't get spurious Create events later
|
||||||
|
// on when we diff the directories.
|
||||||
|
w.watches[name] = 0
|
||||||
|
w.fileExists[name] = struct{}{}
|
||||||
|
return link, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
name = link
|
||||||
fi, err = os.Lstat(name)
|
fi, err = os.Lstat(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil
|
return "", nil
|
||||||
@ -411,7 +475,7 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Retry on EINTR; open() can return EINTR in practice on macOS.
|
// Retry on EINTR; open() can return EINTR in practice on macOS.
|
||||||
// See #354, and go issues 11180 and 39237.
|
// See #354, and Go issues 11180 and 39237.
|
||||||
for {
|
for {
|
||||||
watchfd, err = unix.Open(name, openMode, 0)
|
watchfd, err = unix.Open(name, openMode, 0)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@ -444,14 +508,13 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
|
|||||||
w.watchesByDir[parentName] = watchesByDir
|
w.watchesByDir[parentName] = watchesByDir
|
||||||
}
|
}
|
||||||
watchesByDir[watchfd] = struct{}{}
|
watchesByDir[watchfd] = struct{}{}
|
||||||
|
|
||||||
w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
|
w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
|
||||||
w.mu.Unlock()
|
w.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
if isDir {
|
if isDir {
|
||||||
// Watch the directory if it has not been watched before,
|
// Watch the directory if it has not been watched before, or if it was
|
||||||
// or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
|
// watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
|
||||||
w.mu.Lock()
|
w.mu.Lock()
|
||||||
|
|
||||||
watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
|
watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
|
||||||
@ -473,13 +536,10 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
|
|||||||
// Event values that it sends down the Events channel.
|
// Event values that it sends down the Events channel.
|
||||||
func (w *Watcher) readEvents() {
|
func (w *Watcher) readEvents() {
|
||||||
defer func() {
|
defer func() {
|
||||||
err := unix.Close(w.kq)
|
|
||||||
if err != nil {
|
|
||||||
w.Errors <- err
|
|
||||||
}
|
|
||||||
unix.Close(w.closepipe[0])
|
|
||||||
close(w.Events)
|
close(w.Events)
|
||||||
close(w.Errors)
|
close(w.Errors)
|
||||||
|
_ = unix.Close(w.kq)
|
||||||
|
unix.Close(w.closepipe[0])
|
||||||
}()
|
}()
|
||||||
|
|
||||||
eventBuffer := make([]unix.Kevent_t, 10)
|
eventBuffer := make([]unix.Kevent_t, 10)
|
||||||
@ -513,18 +573,8 @@ func (w *Watcher) readEvents() {
|
|||||||
|
|
||||||
event := w.newEvent(path.name, mask)
|
event := w.newEvent(path.name, mask)
|
||||||
|
|
||||||
if path.isDir && !event.Has(Remove) {
|
|
||||||
// Double check to make sure the directory exists. This can
|
|
||||||
// happen when we do a rm -fr on a recursively watched folders
|
|
||||||
// and we receive a modification event first but the folder has
|
|
||||||
// been deleted and later receive the delete event.
|
|
||||||
if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
|
|
||||||
event.Op |= Remove
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if event.Has(Rename) || event.Has(Remove) {
|
if event.Has(Rename) || event.Has(Remove) {
|
||||||
w.Remove(event.Name)
|
w.remove(event.Name, false)
|
||||||
w.mu.Lock()
|
w.mu.Lock()
|
||||||
delete(w.fileExists, event.Name)
|
delete(w.fileExists, event.Name)
|
||||||
w.mu.Unlock()
|
w.mu.Unlock()
|
||||||
@ -540,26 +590,30 @@ func (w *Watcher) readEvents() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if event.Has(Remove) {
|
if event.Has(Remove) {
|
||||||
// Look for a file that may have overwritten this.
|
// Look for a file that may have overwritten this; for example,
|
||||||
// For example, mv f1 f2 will delete f2, then create f2.
|
// mv f1 f2 will delete f2, then create f2.
|
||||||
if path.isDir {
|
if path.isDir {
|
||||||
fileDir := filepath.Clean(event.Name)
|
fileDir := filepath.Clean(event.Name)
|
||||||
w.mu.Lock()
|
w.mu.Lock()
|
||||||
_, found := w.watches[fileDir]
|
_, found := w.watches[fileDir]
|
||||||
w.mu.Unlock()
|
w.mu.Unlock()
|
||||||
if found {
|
if found {
|
||||||
// make sure the directory exists before we watch for changes. When we
|
err := w.sendDirectoryChangeEvents(fileDir)
|
||||||
// do a recursive watch and perform rm -fr, the parent directory might
|
if err != nil {
|
||||||
// have gone missing, ignore the missing directory and let the
|
if !w.sendError(err) {
|
||||||
// upcoming delete event remove the watch from the parent directory.
|
closed = true
|
||||||
if _, err := os.Lstat(fileDir); err == nil {
|
}
|
||||||
w.sendDirectoryChangeEvents(fileDir)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
filePath := filepath.Clean(event.Name)
|
filePath := filepath.Clean(event.Name)
|
||||||
if fileInfo, err := os.Lstat(filePath); err == nil {
|
if fi, err := os.Lstat(filePath); err == nil {
|
||||||
w.sendFileCreatedEventIfNew(filePath, fileInfo)
|
err := w.sendFileCreatedEventIfNew(filePath, fi)
|
||||||
|
if err != nil {
|
||||||
|
if !w.sendError(err) {
|
||||||
|
closed = true
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -582,21 +636,31 @@ func (w *Watcher) newEvent(name string, mask uint32) Event {
|
|||||||
if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
|
if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
|
||||||
e.Op |= Chmod
|
e.Op |= Chmod
|
||||||
}
|
}
|
||||||
|
// No point sending a write and delete event at the same time: if it's gone,
|
||||||
|
// then it's gone.
|
||||||
|
if e.Op.Has(Write) && e.Op.Has(Remove) {
|
||||||
|
e.Op &^= Write
|
||||||
|
}
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
|
|
||||||
// watchDirectoryFiles to mimic inotify when adding a watch on a directory
|
// watchDirectoryFiles to mimic inotify when adding a watch on a directory
|
||||||
func (w *Watcher) watchDirectoryFiles(dirPath string) error {
|
func (w *Watcher) watchDirectoryFiles(dirPath string) error {
|
||||||
// Get all files
|
// Get all files
|
||||||
files, err := ioutil.ReadDir(dirPath)
|
files, err := os.ReadDir(dirPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, fileInfo := range files {
|
for _, f := range files {
|
||||||
path := filepath.Join(dirPath, fileInfo.Name())
|
path := filepath.Join(dirPath, f.Name())
|
||||||
|
|
||||||
cleanPath, err := w.internalWatch(path, fileInfo)
|
fi, err := f.Info()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("%q: %w", path, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cleanPath, err := w.internalWatch(path, fi)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// No permission to read the file; that's not a problem: just skip.
|
// No permission to read the file; that's not a problem: just skip.
|
||||||
// But do add it to w.fileExists to prevent it from being picked up
|
// But do add it to w.fileExists to prevent it from being picked up
|
||||||
@ -606,7 +670,7 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error {
|
|||||||
case errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM):
|
case errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM):
|
||||||
cleanPath = filepath.Clean(path)
|
cleanPath = filepath.Clean(path)
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("%q: %w", filepath.Join(dirPath, fileInfo.Name()), err)
|
return fmt.Errorf("%q: %w", path, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -622,26 +686,37 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error {
|
|||||||
//
|
//
|
||||||
// This functionality is to have the BSD watcher match the inotify, which sends
|
// This functionality is to have the BSD watcher match the inotify, which sends
|
||||||
// a create event for files created in a watched directory.
|
// a create event for files created in a watched directory.
|
||||||
func (w *Watcher) sendDirectoryChangeEvents(dir string) {
|
func (w *Watcher) sendDirectoryChangeEvents(dir string) error {
|
||||||
// Get all files
|
files, err := os.ReadDir(dir)
|
||||||
files, err := ioutil.ReadDir(dir)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !w.sendError(fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)) {
|
// Directory no longer exists: we can ignore this safely. kqueue will
|
||||||
return
|
// still give us the correct events.
|
||||||
|
if errors.Is(err, os.ErrNotExist) {
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Search for new files
|
for _, f := range files {
|
||||||
for _, fi := range files {
|
fi, err := f.Info()
|
||||||
err := w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi)
|
||||||
|
if err != nil {
|
||||||
|
// Don't need to send an error if this file isn't readable.
|
||||||
|
if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// sendFileCreatedEvent sends a create event if the file isn't already being tracked.
|
// sendFileCreatedEvent sends a create event if the file isn't already being tracked.
|
||||||
func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) {
|
func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fi os.FileInfo) (err error) {
|
||||||
w.mu.Lock()
|
w.mu.Lock()
|
||||||
_, doesExist := w.fileExists[filePath]
|
_, doesExist := w.fileExists[filePath]
|
||||||
w.mu.Unlock()
|
w.mu.Unlock()
|
||||||
@ -652,7 +727,7 @@ func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInf
|
|||||||
}
|
}
|
||||||
|
|
||||||
// like watchDirectoryFiles (but without doing another ReadDir)
|
// like watchDirectoryFiles (but without doing another ReadDir)
|
||||||
filePath, err = w.internalWatch(filePath, fileInfo)
|
filePath, err = w.internalWatch(filePath, fi)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -664,10 +739,10 @@ func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInf
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) {
|
func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) {
|
||||||
if fileInfo.IsDir() {
|
if fi.IsDir() {
|
||||||
// mimic Linux providing delete events for subdirectories
|
// mimic Linux providing delete events for subdirectories, but preserve
|
||||||
// but preserve the flags used if currently watching subdirectory
|
// the flags used if currently watching subdirectory
|
||||||
w.mu.Lock()
|
w.mu.Lock()
|
||||||
flags := w.dirFlags[name]
|
flags := w.dirFlags[name]
|
||||||
w.mu.Unlock()
|
w.mu.Unlock()
|
||||||
|
203
vendor/github.com/fsnotify/fsnotify/backend_other.go
generated
vendored
203
vendor/github.com/fsnotify/fsnotify/backend_other.go
generated
vendored
@ -1,39 +1,169 @@
|
|||||||
//go:build !darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows
|
//go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows)
|
||||||
// +build !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows
|
// +build appengine !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows
|
||||||
|
|
||||||
|
// Note: the documentation on the Watcher type and methods is generated from
|
||||||
|
// mkdoc.zsh
|
||||||
|
|
||||||
package fsnotify
|
package fsnotify
|
||||||
|
|
||||||
import (
|
import "errors"
|
||||||
"fmt"
|
|
||||||
"runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Watcher watches a set of files, delivering events to a channel.
|
// Watcher watches a set of paths, delivering events on a channel.
|
||||||
type Watcher struct{}
|
//
|
||||||
|
// A watcher should not be copied (e.g. pass it by pointer, rather than by
|
||||||
|
// value).
|
||||||
|
//
|
||||||
|
// # Linux notes
|
||||||
|
//
|
||||||
|
// When a file is removed a Remove event won't be emitted until all file
|
||||||
|
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
||||||
|
//
|
||||||
|
// fp := os.Open("file")
|
||||||
|
// os.Remove("file") // Triggers Chmod
|
||||||
|
// fp.Close() // Triggers Remove
|
||||||
|
//
|
||||||
|
// This is the event that inotify sends, so not much can be changed about this.
|
||||||
|
//
|
||||||
|
// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
|
||||||
|
// for the number of watches per user, and fs.inotify.max_user_instances
|
||||||
|
// specifies the maximum number of inotify instances per user. Every Watcher you
|
||||||
|
// create is an "instance", and every path you add is a "watch".
|
||||||
|
//
|
||||||
|
// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
|
||||||
|
// /proc/sys/fs/inotify/max_user_instances
|
||||||
|
//
|
||||||
|
// To increase them you can use sysctl or write the value to the /proc file:
|
||||||
|
//
|
||||||
|
// # Default values on Linux 5.18
|
||||||
|
// sysctl fs.inotify.max_user_watches=124983
|
||||||
|
// sysctl fs.inotify.max_user_instances=128
|
||||||
|
//
|
||||||
|
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
||||||
|
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
||||||
|
// your distro's documentation):
|
||||||
|
//
|
||||||
|
// fs.inotify.max_user_watches=124983
|
||||||
|
// fs.inotify.max_user_instances=128
|
||||||
|
//
|
||||||
|
// Reaching the limit will result in a "no space left on device" or "too many open
|
||||||
|
// files" error.
|
||||||
|
//
|
||||||
|
// # kqueue notes (macOS, BSD)
|
||||||
|
//
|
||||||
|
// kqueue requires opening a file descriptor for every file that's being watched;
|
||||||
|
// so if you're watching a directory with five files then that's six file
|
||||||
|
// descriptors. You will run in to your system's "max open files" limit faster on
|
||||||
|
// these platforms.
|
||||||
|
//
|
||||||
|
// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
|
||||||
|
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
||||||
|
// systems.
|
||||||
|
//
|
||||||
|
// # Windows notes
|
||||||
|
//
|
||||||
|
// Paths can be added as "C:\path\to\dir", but forward slashes
|
||||||
|
// ("C:/path/to/dir") will also work.
|
||||||
|
//
|
||||||
|
// When a watched directory is removed it will always send an event for the
|
||||||
|
// directory itself, but may not send events for all files in that directory.
|
||||||
|
// Sometimes it will send events for all times, sometimes it will send no
|
||||||
|
// events, and often only for some files.
|
||||||
|
//
|
||||||
|
// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
|
||||||
|
// value that is guaranteed to work with SMB filesystems. If you have many
|
||||||
|
// events in quick succession this may not be enough, and you will have to use
|
||||||
|
// [WithBufferSize] to increase the value.
|
||||||
|
type Watcher struct {
|
||||||
|
// Events sends the filesystem change events.
|
||||||
|
//
|
||||||
|
// fsnotify can send the following events; a "path" here can refer to a
|
||||||
|
// file, directory, symbolic link, or special file like a FIFO.
|
||||||
|
//
|
||||||
|
// fsnotify.Create A new path was created; this may be followed by one
|
||||||
|
// or more Write events if data also gets written to a
|
||||||
|
// file.
|
||||||
|
//
|
||||||
|
// fsnotify.Remove A path was removed.
|
||||||
|
//
|
||||||
|
// fsnotify.Rename A path was renamed. A rename is always sent with the
|
||||||
|
// old path as Event.Name, and a Create event will be
|
||||||
|
// sent with the new name. Renames are only sent for
|
||||||
|
// paths that are currently watched; e.g. moving an
|
||||||
|
// unmonitored file into a monitored directory will
|
||||||
|
// show up as just a Create. Similarly, renaming a file
|
||||||
|
// to outside a monitored directory will show up as
|
||||||
|
// only a Rename.
|
||||||
|
//
|
||||||
|
// fsnotify.Write A file or named pipe was written to. A Truncate will
|
||||||
|
// also trigger a Write. A single "write action"
|
||||||
|
// initiated by the user may show up as one or multiple
|
||||||
|
// writes, depending on when the system syncs things to
|
||||||
|
// disk. For example when compiling a large Go program
|
||||||
|
// you may get hundreds of Write events, and you may
|
||||||
|
// want to wait until you've stopped receiving them
|
||||||
|
// (see the dedup example in cmd/fsnotify).
|
||||||
|
//
|
||||||
|
// Some systems may send Write event for directories
|
||||||
|
// when the directory content changes.
|
||||||
|
//
|
||||||
|
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
||||||
|
// when a file is removed (or more accurately, when a
|
||||||
|
// link to an inode is removed). On kqueue it's sent
|
||||||
|
// when a file is truncated. On Windows it's never
|
||||||
|
// sent.
|
||||||
|
Events chan Event
|
||||||
|
|
||||||
|
// Errors sends any errors.
|
||||||
|
//
|
||||||
|
// ErrEventOverflow is used to indicate there are too many events:
|
||||||
|
//
|
||||||
|
// - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
|
||||||
|
// - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
|
||||||
|
// - kqueue, fen: Not used.
|
||||||
|
Errors chan error
|
||||||
|
}
|
||||||
|
|
||||||
// NewWatcher creates a new Watcher.
|
// NewWatcher creates a new Watcher.
|
||||||
func NewWatcher() (*Watcher, error) {
|
func NewWatcher() (*Watcher, error) {
|
||||||
return nil, fmt.Errorf("fsnotify not supported on %s", runtime.GOOS)
|
return nil, errors.New("fsnotify not supported on the current platform")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close removes all watches and closes the events channel.
|
// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
|
||||||
func (w *Watcher) Close() error {
|
// channel.
|
||||||
return nil
|
//
|
||||||
}
|
// The main use case for this is situations with a very large number of events
|
||||||
|
// where the kernel buffer size can't be increased (e.g. due to lack of
|
||||||
|
// permissions). An unbuffered Watcher will perform better for almost all use
|
||||||
|
// cases, and whenever possible you will be better off increasing the kernel
|
||||||
|
// buffers instead of adding a large userspace buffer.
|
||||||
|
func NewBufferedWatcher(sz uint) (*Watcher, error) { return NewWatcher() }
|
||||||
|
|
||||||
|
// Close removes all watches and closes the Events channel.
|
||||||
|
func (w *Watcher) Close() error { return nil }
|
||||||
|
|
||||||
|
// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
|
||||||
|
// yet removed).
|
||||||
|
//
|
||||||
|
// Returns nil if [Watcher.Close] was called.
|
||||||
|
func (w *Watcher) WatchList() []string { return nil }
|
||||||
|
|
||||||
// Add starts monitoring the path for changes.
|
// Add starts monitoring the path for changes.
|
||||||
//
|
//
|
||||||
// A path can only be watched once; attempting to watch it more than once will
|
// A path can only be watched once; watching it more than once is a no-op and will
|
||||||
// return an error. Paths that do not yet exist on the filesystem cannot be
|
// not return an error. Paths that do not yet exist on the filesystem cannot be
|
||||||
// added. A watch will be automatically removed if the path is deleted.
|
// watched.
|
||||||
//
|
//
|
||||||
// A path will remain watched if it gets renamed to somewhere else on the same
|
// A watch will be automatically removed if the watched path is deleted or
|
||||||
// filesystem, but the monitor will get removed if the path gets deleted and
|
// renamed. The exception is the Windows backend, which doesn't remove the
|
||||||
// re-created, or if it's moved to a different filesystem.
|
// watcher on renames.
|
||||||
//
|
//
|
||||||
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
||||||
// filesystems (/proc, /sys, etc.) generally don't work.
|
// filesystems (/proc, /sys, etc.) generally don't work.
|
||||||
//
|
//
|
||||||
|
// Returns [ErrClosed] if [Watcher.Close] was called.
|
||||||
|
//
|
||||||
|
// See [Watcher.AddWith] for a version that allows adding options.
|
||||||
|
//
|
||||||
// # Watching directories
|
// # Watching directories
|
||||||
//
|
//
|
||||||
// All files in a directory are monitored, including new files that are created
|
// All files in a directory are monitored, including new files that are created
|
||||||
@ -43,17 +173,26 @@ func (w *Watcher) Close() error {
|
|||||||
// # Watching files
|
// # Watching files
|
||||||
//
|
//
|
||||||
// Watching individual files (rather than directories) is generally not
|
// Watching individual files (rather than directories) is generally not
|
||||||
// recommended as many tools update files atomically. Instead of "just" writing
|
// recommended as many programs (especially editors) update files atomically: it
|
||||||
// to the file a temporary file will be written to first, and if successful the
|
// will write to a temporary file which is then moved to to destination,
|
||||||
// temporary file is moved to to destination removing the original, or some
|
// overwriting the original (or some variant thereof). The watcher on the
|
||||||
// variant thereof. The watcher on the original file is now lost, as it no
|
// original file is now lost, as that no longer exists.
|
||||||
// longer exists.
|
|
||||||
//
|
//
|
||||||
// Instead, watch the parent directory and use Event.Name to filter out files
|
// The upshot of this is that a power failure or crash won't leave a
|
||||||
// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
|
// half-written file.
|
||||||
func (w *Watcher) Add(name string) error {
|
//
|
||||||
return nil
|
// Watch the parent directory and use Event.Name to filter out files you're not
|
||||||
}
|
// interested in. There is an example of this in cmd/fsnotify/file.go.
|
||||||
|
func (w *Watcher) Add(name string) error { return nil }
|
||||||
|
|
||||||
|
// AddWith is like [Watcher.Add], but allows adding options. When using Add()
|
||||||
|
// the defaults described below are used.
|
||||||
|
//
|
||||||
|
// Possible options are:
|
||||||
|
//
|
||||||
|
// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
|
||||||
|
// other platforms. The default is 64K (65536 bytes).
|
||||||
|
func (w *Watcher) AddWith(name string, opts ...addOpt) error { return nil }
|
||||||
|
|
||||||
// Remove stops monitoring the path for changes.
|
// Remove stops monitoring the path for changes.
|
||||||
//
|
//
|
||||||
@ -61,6 +200,6 @@ func (w *Watcher) Add(name string) error {
|
|||||||
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
||||||
//
|
//
|
||||||
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
||||||
func (w *Watcher) Remove(name string) error {
|
//
|
||||||
return nil
|
// Returns nil if [Watcher.Close] was called.
|
||||||
}
|
func (w *Watcher) Remove(name string) error { return nil }
|
||||||
|
245
vendor/github.com/fsnotify/fsnotify/backend_windows.go
generated
vendored
245
vendor/github.com/fsnotify/fsnotify/backend_windows.go
generated
vendored
@ -1,6 +1,13 @@
|
|||||||
//go:build windows
|
//go:build windows
|
||||||
// +build windows
|
// +build windows
|
||||||
|
|
||||||
|
// Windows backend based on ReadDirectoryChangesW()
|
||||||
|
//
|
||||||
|
// https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw
|
||||||
|
//
|
||||||
|
// Note: the documentation on the Watcher type and methods is generated from
|
||||||
|
// mkdoc.zsh
|
||||||
|
|
||||||
package fsnotify
|
package fsnotify
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -27,9 +34,9 @@ import (
|
|||||||
// When a file is removed a Remove event won't be emitted until all file
|
// When a file is removed a Remove event won't be emitted until all file
|
||||||
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
||||||
//
|
//
|
||||||
// fp := os.Open("file")
|
// fp := os.Open("file")
|
||||||
// os.Remove("file") // Triggers Chmod
|
// os.Remove("file") // Triggers Chmod
|
||||||
// fp.Close() // Triggers Remove
|
// fp.Close() // Triggers Remove
|
||||||
//
|
//
|
||||||
// This is the event that inotify sends, so not much can be changed about this.
|
// This is the event that inotify sends, so not much can be changed about this.
|
||||||
//
|
//
|
||||||
@ -43,16 +50,16 @@ import (
|
|||||||
//
|
//
|
||||||
// To increase them you can use sysctl or write the value to the /proc file:
|
// To increase them you can use sysctl or write the value to the /proc file:
|
||||||
//
|
//
|
||||||
// # Default values on Linux 5.18
|
// # Default values on Linux 5.18
|
||||||
// sysctl fs.inotify.max_user_watches=124983
|
// sysctl fs.inotify.max_user_watches=124983
|
||||||
// sysctl fs.inotify.max_user_instances=128
|
// sysctl fs.inotify.max_user_instances=128
|
||||||
//
|
//
|
||||||
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
||||||
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
||||||
// your distro's documentation):
|
// your distro's documentation):
|
||||||
//
|
//
|
||||||
// fs.inotify.max_user_watches=124983
|
// fs.inotify.max_user_watches=124983
|
||||||
// fs.inotify.max_user_instances=128
|
// fs.inotify.max_user_instances=128
|
||||||
//
|
//
|
||||||
// Reaching the limit will result in a "no space left on device" or "too many open
|
// Reaching the limit will result in a "no space left on device" or "too many open
|
||||||
// files" error.
|
// files" error.
|
||||||
@ -68,14 +75,20 @@ import (
|
|||||||
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
||||||
// systems.
|
// systems.
|
||||||
//
|
//
|
||||||
// # macOS notes
|
// # Windows notes
|
||||||
//
|
//
|
||||||
// Spotlight indexing on macOS can result in multiple events (see [#15]). A
|
// Paths can be added as "C:\path\to\dir", but forward slashes
|
||||||
// temporary workaround is to add your folder(s) to the "Spotlight Privacy
|
// ("C:/path/to/dir") will also work.
|
||||||
// Settings" until we have a native FSEvents implementation (see [#11]).
|
|
||||||
//
|
//
|
||||||
// [#11]: https://github.com/fsnotify/fsnotify/issues/11
|
// When a watched directory is removed it will always send an event for the
|
||||||
// [#15]: https://github.com/fsnotify/fsnotify/issues/15
|
// directory itself, but may not send events for all files in that directory.
|
||||||
|
// Sometimes it will send events for all times, sometimes it will send no
|
||||||
|
// events, and often only for some files.
|
||||||
|
//
|
||||||
|
// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
|
||||||
|
// value that is guaranteed to work with SMB filesystems. If you have many
|
||||||
|
// events in quick succession this may not be enough, and you will have to use
|
||||||
|
// [WithBufferSize] to increase the value.
|
||||||
type Watcher struct {
|
type Watcher struct {
|
||||||
// Events sends the filesystem change events.
|
// Events sends the filesystem change events.
|
||||||
//
|
//
|
||||||
@ -102,31 +115,52 @@ type Watcher struct {
|
|||||||
// initiated by the user may show up as one or multiple
|
// initiated by the user may show up as one or multiple
|
||||||
// writes, depending on when the system syncs things to
|
// writes, depending on when the system syncs things to
|
||||||
// disk. For example when compiling a large Go program
|
// disk. For example when compiling a large Go program
|
||||||
// you may get hundreds of Write events, so you
|
// you may get hundreds of Write events, and you may
|
||||||
// probably want to wait until you've stopped receiving
|
// want to wait until you've stopped receiving them
|
||||||
// them (see the dedup example in cmd/fsnotify).
|
// (see the dedup example in cmd/fsnotify).
|
||||||
|
//
|
||||||
|
// Some systems may send Write event for directories
|
||||||
|
// when the directory content changes.
|
||||||
//
|
//
|
||||||
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
||||||
// when a file is removed (or more accurately, when a
|
// when a file is removed (or more accurately, when a
|
||||||
// link to an inode is removed). On kqueue it's sent
|
// link to an inode is removed). On kqueue it's sent
|
||||||
// and on kqueue when a file is truncated. On Windows
|
// when a file is truncated. On Windows it's never
|
||||||
// it's never sent.
|
// sent.
|
||||||
Events chan Event
|
Events chan Event
|
||||||
|
|
||||||
// Errors sends any errors.
|
// Errors sends any errors.
|
||||||
|
//
|
||||||
|
// ErrEventOverflow is used to indicate there are too many events:
|
||||||
|
//
|
||||||
|
// - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
|
||||||
|
// - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
|
||||||
|
// - kqueue, fen: Not used.
|
||||||
Errors chan error
|
Errors chan error
|
||||||
|
|
||||||
port windows.Handle // Handle to completion port
|
port windows.Handle // Handle to completion port
|
||||||
input chan *input // Inputs to the reader are sent on this channel
|
input chan *input // Inputs to the reader are sent on this channel
|
||||||
quit chan chan<- error
|
quit chan chan<- error
|
||||||
|
|
||||||
mu sync.Mutex // Protects access to watches, isClosed
|
mu sync.Mutex // Protects access to watches, closed
|
||||||
watches watchMap // Map of watches (key: i-number)
|
watches watchMap // Map of watches (key: i-number)
|
||||||
isClosed bool // Set to true when Close() is first called
|
closed bool // Set to true when Close() is first called
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewWatcher creates a new Watcher.
|
// NewWatcher creates a new Watcher.
|
||||||
func NewWatcher() (*Watcher, error) {
|
func NewWatcher() (*Watcher, error) {
|
||||||
|
return NewBufferedWatcher(50)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
|
||||||
|
// channel.
|
||||||
|
//
|
||||||
|
// The main use case for this is situations with a very large number of events
|
||||||
|
// where the kernel buffer size can't be increased (e.g. due to lack of
|
||||||
|
// permissions). An unbuffered Watcher will perform better for almost all use
|
||||||
|
// cases, and whenever possible you will be better off increasing the kernel
|
||||||
|
// buffers instead of adding a large userspace buffer.
|
||||||
|
func NewBufferedWatcher(sz uint) (*Watcher, error) {
|
||||||
port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0)
|
port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, os.NewSyscallError("CreateIoCompletionPort", err)
|
return nil, os.NewSyscallError("CreateIoCompletionPort", err)
|
||||||
@ -135,7 +169,7 @@ func NewWatcher() (*Watcher, error) {
|
|||||||
port: port,
|
port: port,
|
||||||
watches: make(watchMap),
|
watches: make(watchMap),
|
||||||
input: make(chan *input, 1),
|
input: make(chan *input, 1),
|
||||||
Events: make(chan Event, 50),
|
Events: make(chan Event, sz),
|
||||||
Errors: make(chan error),
|
Errors: make(chan error),
|
||||||
quit: make(chan chan<- error, 1),
|
quit: make(chan chan<- error, 1),
|
||||||
}
|
}
|
||||||
@ -143,6 +177,12 @@ func NewWatcher() (*Watcher, error) {
|
|||||||
return w, nil
|
return w, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (w *Watcher) isClosed() bool {
|
||||||
|
w.mu.Lock()
|
||||||
|
defer w.mu.Unlock()
|
||||||
|
return w.closed
|
||||||
|
}
|
||||||
|
|
||||||
func (w *Watcher) sendEvent(name string, mask uint64) bool {
|
func (w *Watcher) sendEvent(name string, mask uint64) bool {
|
||||||
if mask == 0 {
|
if mask == 0 {
|
||||||
return false
|
return false
|
||||||
@ -167,14 +207,14 @@ func (w *Watcher) sendError(err error) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close removes all watches and closes the events channel.
|
// Close removes all watches and closes the Events channel.
|
||||||
func (w *Watcher) Close() error {
|
func (w *Watcher) Close() error {
|
||||||
w.mu.Lock()
|
if w.isClosed() {
|
||||||
if w.isClosed {
|
|
||||||
w.mu.Unlock()
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
w.isClosed = true
|
|
||||||
|
w.mu.Lock()
|
||||||
|
w.closed = true
|
||||||
w.mu.Unlock()
|
w.mu.Unlock()
|
||||||
|
|
||||||
// Send "quit" message to the reader goroutine
|
// Send "quit" message to the reader goroutine
|
||||||
@ -188,17 +228,21 @@ func (w *Watcher) Close() error {
|
|||||||
|
|
||||||
// Add starts monitoring the path for changes.
|
// Add starts monitoring the path for changes.
|
||||||
//
|
//
|
||||||
// A path can only be watched once; attempting to watch it more than once will
|
// A path can only be watched once; watching it more than once is a no-op and will
|
||||||
// return an error. Paths that do not yet exist on the filesystem cannot be
|
// not return an error. Paths that do not yet exist on the filesystem cannot be
|
||||||
// added. A watch will be automatically removed if the path is deleted.
|
// watched.
|
||||||
//
|
//
|
||||||
// A path will remain watched if it gets renamed to somewhere else on the same
|
// A watch will be automatically removed if the watched path is deleted or
|
||||||
// filesystem, but the monitor will get removed if the path gets deleted and
|
// renamed. The exception is the Windows backend, which doesn't remove the
|
||||||
// re-created, or if it's moved to a different filesystem.
|
// watcher on renames.
|
||||||
//
|
//
|
||||||
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
||||||
// filesystems (/proc, /sys, etc.) generally don't work.
|
// filesystems (/proc, /sys, etc.) generally don't work.
|
||||||
//
|
//
|
||||||
|
// Returns [ErrClosed] if [Watcher.Close] was called.
|
||||||
|
//
|
||||||
|
// See [Watcher.AddWith] for a version that allows adding options.
|
||||||
|
//
|
||||||
// # Watching directories
|
// # Watching directories
|
||||||
//
|
//
|
||||||
// All files in a directory are monitored, including new files that are created
|
// All files in a directory are monitored, including new files that are created
|
||||||
@ -208,27 +252,41 @@ func (w *Watcher) Close() error {
|
|||||||
// # Watching files
|
// # Watching files
|
||||||
//
|
//
|
||||||
// Watching individual files (rather than directories) is generally not
|
// Watching individual files (rather than directories) is generally not
|
||||||
// recommended as many tools update files atomically. Instead of "just" writing
|
// recommended as many programs (especially editors) update files atomically: it
|
||||||
// to the file a temporary file will be written to first, and if successful the
|
// will write to a temporary file which is then moved to to destination,
|
||||||
// temporary file is moved to to destination removing the original, or some
|
// overwriting the original (or some variant thereof). The watcher on the
|
||||||
// variant thereof. The watcher on the original file is now lost, as it no
|
// original file is now lost, as that no longer exists.
|
||||||
// longer exists.
|
|
||||||
//
|
//
|
||||||
// Instead, watch the parent directory and use Event.Name to filter out files
|
// The upshot of this is that a power failure or crash won't leave a
|
||||||
// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
|
// half-written file.
|
||||||
func (w *Watcher) Add(name string) error {
|
//
|
||||||
w.mu.Lock()
|
// Watch the parent directory and use Event.Name to filter out files you're not
|
||||||
if w.isClosed {
|
// interested in. There is an example of this in cmd/fsnotify/file.go.
|
||||||
w.mu.Unlock()
|
func (w *Watcher) Add(name string) error { return w.AddWith(name) }
|
||||||
return errors.New("watcher already closed")
|
|
||||||
|
// AddWith is like [Watcher.Add], but allows adding options. When using Add()
|
||||||
|
// the defaults described below are used.
|
||||||
|
//
|
||||||
|
// Possible options are:
|
||||||
|
//
|
||||||
|
// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
|
||||||
|
// other platforms. The default is 64K (65536 bytes).
|
||||||
|
func (w *Watcher) AddWith(name string, opts ...addOpt) error {
|
||||||
|
if w.isClosed() {
|
||||||
|
return ErrClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
with := getOptions(opts...)
|
||||||
|
if with.bufsize < 4096 {
|
||||||
|
return fmt.Errorf("fsnotify.WithBufferSize: buffer size cannot be smaller than 4096 bytes")
|
||||||
}
|
}
|
||||||
w.mu.Unlock()
|
|
||||||
|
|
||||||
in := &input{
|
in := &input{
|
||||||
op: opAddWatch,
|
op: opAddWatch,
|
||||||
path: filepath.Clean(name),
|
path: filepath.Clean(name),
|
||||||
flags: sysFSALLEVENTS,
|
flags: sysFSALLEVENTS,
|
||||||
reply: make(chan error),
|
reply: make(chan error),
|
||||||
|
bufsize: with.bufsize,
|
||||||
}
|
}
|
||||||
w.input <- in
|
w.input <- in
|
||||||
if err := w.wakeupReader(); err != nil {
|
if err := w.wakeupReader(); err != nil {
|
||||||
@ -243,7 +301,13 @@ func (w *Watcher) Add(name string) error {
|
|||||||
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
||||||
//
|
//
|
||||||
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
||||||
|
//
|
||||||
|
// Returns nil if [Watcher.Close] was called.
|
||||||
func (w *Watcher) Remove(name string) error {
|
func (w *Watcher) Remove(name string) error {
|
||||||
|
if w.isClosed() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
in := &input{
|
in := &input{
|
||||||
op: opRemoveWatch,
|
op: opRemoveWatch,
|
||||||
path: filepath.Clean(name),
|
path: filepath.Clean(name),
|
||||||
@ -256,8 +320,15 @@ func (w *Watcher) Remove(name string) error {
|
|||||||
return <-in.reply
|
return <-in.reply
|
||||||
}
|
}
|
||||||
|
|
||||||
// WatchList returns all paths added with [Add] (and are not yet removed).
|
// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
|
||||||
|
// yet removed).
|
||||||
|
//
|
||||||
|
// Returns nil if [Watcher.Close] was called.
|
||||||
func (w *Watcher) WatchList() []string {
|
func (w *Watcher) WatchList() []string {
|
||||||
|
if w.isClosed() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
w.mu.Lock()
|
w.mu.Lock()
|
||||||
defer w.mu.Unlock()
|
defer w.mu.Unlock()
|
||||||
|
|
||||||
@ -279,7 +350,6 @@ func (w *Watcher) WatchList() []string {
|
|||||||
// This should all be removed at some point, and just use windows.FILE_NOTIFY_*
|
// This should all be removed at some point, and just use windows.FILE_NOTIFY_*
|
||||||
const (
|
const (
|
||||||
sysFSALLEVENTS = 0xfff
|
sysFSALLEVENTS = 0xfff
|
||||||
sysFSATTRIB = 0x4
|
|
||||||
sysFSCREATE = 0x100
|
sysFSCREATE = 0x100
|
||||||
sysFSDELETE = 0x200
|
sysFSDELETE = 0x200
|
||||||
sysFSDELETESELF = 0x400
|
sysFSDELETESELF = 0x400
|
||||||
@ -305,9 +375,6 @@ func (w *Watcher) newEvent(name string, mask uint32) Event {
|
|||||||
if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
|
if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
|
||||||
e.Op |= Rename
|
e.Op |= Rename
|
||||||
}
|
}
|
||||||
if mask&sysFSATTRIB == sysFSATTRIB {
|
|
||||||
e.Op |= Chmod
|
|
||||||
}
|
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -321,10 +388,11 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type input struct {
|
type input struct {
|
||||||
op int
|
op int
|
||||||
path string
|
path string
|
||||||
flags uint32
|
flags uint32
|
||||||
reply chan error
|
bufsize int
|
||||||
|
reply chan error
|
||||||
}
|
}
|
||||||
|
|
||||||
type inode struct {
|
type inode struct {
|
||||||
@ -334,13 +402,14 @@ type inode struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type watch struct {
|
type watch struct {
|
||||||
ov windows.Overlapped
|
ov windows.Overlapped
|
||||||
ino *inode // i-number
|
ino *inode // i-number
|
||||||
path string // Directory path
|
recurse bool // Recursive watch?
|
||||||
mask uint64 // Directory itself is being watched with these notify flags
|
path string // Directory path
|
||||||
names map[string]uint64 // Map of names being watched and their notify flags
|
mask uint64 // Directory itself is being watched with these notify flags
|
||||||
rename string // Remembers the old name while renaming a file
|
names map[string]uint64 // Map of names being watched and their notify flags
|
||||||
buf [65536]byte // 64K buffer
|
rename string // Remembers the old name while renaming a file
|
||||||
|
buf []byte // buffer, allocated later
|
||||||
}
|
}
|
||||||
|
|
||||||
type (
|
type (
|
||||||
@ -413,7 +482,10 @@ func (m watchMap) set(ino *inode, watch *watch) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Must run within the I/O thread.
|
// Must run within the I/O thread.
|
||||||
func (w *Watcher) addWatch(pathname string, flags uint64) error {
|
func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error {
|
||||||
|
//pathname, recurse := recursivePath(pathname)
|
||||||
|
recurse := false
|
||||||
|
|
||||||
dir, err := w.getDir(pathname)
|
dir, err := w.getDir(pathname)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -433,9 +505,11 @@ func (w *Watcher) addWatch(pathname string, flags uint64) error {
|
|||||||
return os.NewSyscallError("CreateIoCompletionPort", err)
|
return os.NewSyscallError("CreateIoCompletionPort", err)
|
||||||
}
|
}
|
||||||
watchEntry = &watch{
|
watchEntry = &watch{
|
||||||
ino: ino,
|
ino: ino,
|
||||||
path: dir,
|
path: dir,
|
||||||
names: make(map[string]uint64),
|
names: make(map[string]uint64),
|
||||||
|
recurse: recurse,
|
||||||
|
buf: make([]byte, bufsize),
|
||||||
}
|
}
|
||||||
w.mu.Lock()
|
w.mu.Lock()
|
||||||
w.watches.set(ino, watchEntry)
|
w.watches.set(ino, watchEntry)
|
||||||
@ -465,6 +539,8 @@ func (w *Watcher) addWatch(pathname string, flags uint64) error {
|
|||||||
|
|
||||||
// Must run within the I/O thread.
|
// Must run within the I/O thread.
|
||||||
func (w *Watcher) remWatch(pathname string) error {
|
func (w *Watcher) remWatch(pathname string) error {
|
||||||
|
pathname, recurse := recursivePath(pathname)
|
||||||
|
|
||||||
dir, err := w.getDir(pathname)
|
dir, err := w.getDir(pathname)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -478,6 +554,10 @@ func (w *Watcher) remWatch(pathname string) error {
|
|||||||
watch := w.watches.get(ino)
|
watch := w.watches.get(ino)
|
||||||
w.mu.Unlock()
|
w.mu.Unlock()
|
||||||
|
|
||||||
|
if recurse && !watch.recurse {
|
||||||
|
return fmt.Errorf("can't use \\... with non-recursive watch %q", pathname)
|
||||||
|
}
|
||||||
|
|
||||||
err = windows.CloseHandle(ino.handle)
|
err = windows.CloseHandle(ino.handle)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
w.sendError(os.NewSyscallError("CloseHandle", err))
|
w.sendError(os.NewSyscallError("CloseHandle", err))
|
||||||
@ -535,8 +615,11 @@ func (w *Watcher) startRead(watch *watch) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
rdErr := windows.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
|
// We need to pass the array, rather than the slice.
|
||||||
uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
|
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&watch.buf))
|
||||||
|
rdErr := windows.ReadDirectoryChanges(watch.ino.handle,
|
||||||
|
(*byte)(unsafe.Pointer(hdr.Data)), uint32(hdr.Len),
|
||||||
|
watch.recurse, mask, nil, &watch.ov, 0)
|
||||||
if rdErr != nil {
|
if rdErr != nil {
|
||||||
err := os.NewSyscallError("ReadDirectoryChanges", rdErr)
|
err := os.NewSyscallError("ReadDirectoryChanges", rdErr)
|
||||||
if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
|
if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
|
||||||
@ -563,9 +646,8 @@ func (w *Watcher) readEvents() {
|
|||||||
runtime.LockOSThread()
|
runtime.LockOSThread()
|
||||||
|
|
||||||
for {
|
for {
|
||||||
|
// This error is handled after the watch == nil check below.
|
||||||
qErr := windows.GetQueuedCompletionStatus(w.port, &n, &key, &ov, windows.INFINITE)
|
qErr := windows.GetQueuedCompletionStatus(w.port, &n, &key, &ov, windows.INFINITE)
|
||||||
// This error is handled after the watch == nil check below. NOTE: this
|
|
||||||
// seems odd, note sure if it's correct.
|
|
||||||
|
|
||||||
watch := (*watch)(unsafe.Pointer(ov))
|
watch := (*watch)(unsafe.Pointer(ov))
|
||||||
if watch == nil {
|
if watch == nil {
|
||||||
@ -595,7 +677,7 @@ func (w *Watcher) readEvents() {
|
|||||||
case in := <-w.input:
|
case in := <-w.input:
|
||||||
switch in.op {
|
switch in.op {
|
||||||
case opAddWatch:
|
case opAddWatch:
|
||||||
in.reply <- w.addWatch(in.path, uint64(in.flags))
|
in.reply <- w.addWatch(in.path, uint64(in.flags), in.bufsize)
|
||||||
case opRemoveWatch:
|
case opRemoveWatch:
|
||||||
in.reply <- w.remWatch(in.path)
|
in.reply <- w.remWatch(in.path)
|
||||||
}
|
}
|
||||||
@ -605,6 +687,8 @@ func (w *Watcher) readEvents() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
switch qErr {
|
switch qErr {
|
||||||
|
case nil:
|
||||||
|
// No error
|
||||||
case windows.ERROR_MORE_DATA:
|
case windows.ERROR_MORE_DATA:
|
||||||
if watch == nil {
|
if watch == nil {
|
||||||
w.sendError(errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer"))
|
w.sendError(errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer"))
|
||||||
@ -626,13 +710,12 @@ func (w *Watcher) readEvents() {
|
|||||||
default:
|
default:
|
||||||
w.sendError(os.NewSyscallError("GetQueuedCompletionPort", qErr))
|
w.sendError(os.NewSyscallError("GetQueuedCompletionPort", qErr))
|
||||||
continue
|
continue
|
||||||
case nil:
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var offset uint32
|
var offset uint32
|
||||||
for {
|
for {
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
w.sendError(errors.New("short read in readEvents()"))
|
w.sendError(ErrEventOverflow)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -703,8 +786,9 @@ func (w *Watcher) readEvents() {
|
|||||||
|
|
||||||
// Error!
|
// Error!
|
||||||
if offset >= n {
|
if offset >= n {
|
||||||
|
//lint:ignore ST1005 Windows should be capitalized
|
||||||
w.sendError(errors.New(
|
w.sendError(errors.New(
|
||||||
"Windows system assumed buffer larger than it is, events have likely been missed."))
|
"Windows system assumed buffer larger than it is, events have likely been missed"))
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -720,9 +804,6 @@ func (w *Watcher) toWindowsFlags(mask uint64) uint32 {
|
|||||||
if mask&sysFSMODIFY != 0 {
|
if mask&sysFSMODIFY != 0 {
|
||||||
m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE
|
m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE
|
||||||
}
|
}
|
||||||
if mask&sysFSATTRIB != 0 {
|
|
||||||
m |= windows.FILE_NOTIFY_CHANGE_ATTRIBUTES
|
|
||||||
}
|
|
||||||
if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
|
if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
|
||||||
m |= windows.FILE_NOTIFY_CHANGE_FILE_NAME | windows.FILE_NOTIFY_CHANGE_DIR_NAME
|
m |= windows.FILE_NOTIFY_CHANGE_FILE_NAME | windows.FILE_NOTIFY_CHANGE_DIR_NAME
|
||||||
}
|
}
|
||||||
|
91
vendor/github.com/fsnotify/fsnotify/fsnotify.go
generated
vendored
91
vendor/github.com/fsnotify/fsnotify/fsnotify.go
generated
vendored
@ -1,13 +1,18 @@
|
|||||||
//go:build !plan9
|
|
||||||
// +build !plan9
|
|
||||||
|
|
||||||
// Package fsnotify provides a cross-platform interface for file system
|
// Package fsnotify provides a cross-platform interface for file system
|
||||||
// notifications.
|
// notifications.
|
||||||
|
//
|
||||||
|
// Currently supported systems:
|
||||||
|
//
|
||||||
|
// Linux 2.6.32+ via inotify
|
||||||
|
// BSD, macOS via kqueue
|
||||||
|
// Windows via ReadDirectoryChangesW
|
||||||
|
// illumos via FEN
|
||||||
package fsnotify
|
package fsnotify
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -33,34 +38,52 @@ type Op uint32
|
|||||||
// The operations fsnotify can trigger; see the documentation on [Watcher] for a
|
// The operations fsnotify can trigger; see the documentation on [Watcher] for a
|
||||||
// full description, and check them with [Event.Has].
|
// full description, and check them with [Event.Has].
|
||||||
const (
|
const (
|
||||||
|
// A new pathname was created.
|
||||||
Create Op = 1 << iota
|
Create Op = 1 << iota
|
||||||
|
|
||||||
|
// The pathname was written to; this does *not* mean the write has finished,
|
||||||
|
// and a write can be followed by more writes.
|
||||||
Write
|
Write
|
||||||
|
|
||||||
|
// The path was removed; any watches on it will be removed. Some "remove"
|
||||||
|
// operations may trigger a Rename if the file is actually moved (for
|
||||||
|
// example "remove to trash" is often a rename).
|
||||||
Remove
|
Remove
|
||||||
|
|
||||||
|
// The path was renamed to something else; any watched on it will be
|
||||||
|
// removed.
|
||||||
Rename
|
Rename
|
||||||
|
|
||||||
|
// File attributes were changed.
|
||||||
|
//
|
||||||
|
// It's generally not recommended to take action on this event, as it may
|
||||||
|
// get triggered very frequently by some software. For example, Spotlight
|
||||||
|
// indexing on macOS, anti-virus software, backup software, etc.
|
||||||
Chmod
|
Chmod
|
||||||
)
|
)
|
||||||
|
|
||||||
// Common errors that can be reported by a watcher
|
// Common errors that can be reported.
|
||||||
var (
|
var (
|
||||||
ErrNonExistentWatch = errors.New("can't remove non-existent watcher")
|
ErrNonExistentWatch = errors.New("fsnotify: can't remove non-existent watch")
|
||||||
ErrEventOverflow = errors.New("fsnotify queue overflow")
|
ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow")
|
||||||
|
ErrClosed = errors.New("fsnotify: watcher already closed")
|
||||||
)
|
)
|
||||||
|
|
||||||
func (op Op) String() string {
|
func (o Op) String() string {
|
||||||
var b strings.Builder
|
var b strings.Builder
|
||||||
if op.Has(Create) {
|
if o.Has(Create) {
|
||||||
b.WriteString("|CREATE")
|
b.WriteString("|CREATE")
|
||||||
}
|
}
|
||||||
if op.Has(Remove) {
|
if o.Has(Remove) {
|
||||||
b.WriteString("|REMOVE")
|
b.WriteString("|REMOVE")
|
||||||
}
|
}
|
||||||
if op.Has(Write) {
|
if o.Has(Write) {
|
||||||
b.WriteString("|WRITE")
|
b.WriteString("|WRITE")
|
||||||
}
|
}
|
||||||
if op.Has(Rename) {
|
if o.Has(Rename) {
|
||||||
b.WriteString("|RENAME")
|
b.WriteString("|RENAME")
|
||||||
}
|
}
|
||||||
if op.Has(Chmod) {
|
if o.Has(Chmod) {
|
||||||
b.WriteString("|CHMOD")
|
b.WriteString("|CHMOD")
|
||||||
}
|
}
|
||||||
if b.Len() == 0 {
|
if b.Len() == 0 {
|
||||||
@ -70,7 +93,7 @@ func (op Op) String() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Has reports if this operation has the given operation.
|
// Has reports if this operation has the given operation.
|
||||||
func (o Op) Has(h Op) bool { return o&h == h }
|
func (o Op) Has(h Op) bool { return o&h != 0 }
|
||||||
|
|
||||||
// Has reports if this event has the given operation.
|
// Has reports if this event has the given operation.
|
||||||
func (e Event) Has(op Op) bool { return e.Op.Has(op) }
|
func (e Event) Has(op Op) bool { return e.Op.Has(op) }
|
||||||
@ -79,3 +102,45 @@ func (e Event) Has(op Op) bool { return e.Op.Has(op) }
|
|||||||
func (e Event) String() string {
|
func (e Event) String() string {
|
||||||
return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name)
|
return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type (
|
||||||
|
addOpt func(opt *withOpts)
|
||||||
|
withOpts struct {
|
||||||
|
bufsize int
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
var defaultOpts = withOpts{
|
||||||
|
bufsize: 65536, // 64K
|
||||||
|
}
|
||||||
|
|
||||||
|
func getOptions(opts ...addOpt) withOpts {
|
||||||
|
with := defaultOpts
|
||||||
|
for _, o := range opts {
|
||||||
|
o(&with)
|
||||||
|
}
|
||||||
|
return with
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithBufferSize sets the [ReadDirectoryChangesW] buffer size.
|
||||||
|
//
|
||||||
|
// This only has effect on Windows systems, and is a no-op for other backends.
|
||||||
|
//
|
||||||
|
// The default value is 64K (65536 bytes) which is the highest value that works
|
||||||
|
// on all filesystems and should be enough for most applications, but if you
|
||||||
|
// have a large burst of events it may not be enough. You can increase it if
|
||||||
|
// you're hitting "queue or buffer overflow" errors ([ErrEventOverflow]).
|
||||||
|
//
|
||||||
|
// [ReadDirectoryChangesW]: https://learn.microsoft.com/en-gb/windows/win32/api/winbase/nf-winbase-readdirectorychangesw
|
||||||
|
func WithBufferSize(bytes int) addOpt {
|
||||||
|
return func(opt *withOpts) { opt.bufsize = bytes }
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if this path is recursive (ends with "/..." or "\..."), and return the
|
||||||
|
// path with the /... stripped.
|
||||||
|
func recursivePath(path string) (string, bool) {
|
||||||
|
if filepath.Base(path) == "..." {
|
||||||
|
return filepath.Dir(path), true
|
||||||
|
}
|
||||||
|
return path, false
|
||||||
|
}
|
||||||
|
123
vendor/github.com/fsnotify/fsnotify/mkdoc.zsh
generated
vendored
123
vendor/github.com/fsnotify/fsnotify/mkdoc.zsh
generated
vendored
@ -2,8 +2,8 @@
|
|||||||
[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1
|
[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1
|
||||||
setopt err_exit no_unset pipefail extended_glob
|
setopt err_exit no_unset pipefail extended_glob
|
||||||
|
|
||||||
# Simple script to update the godoc comments on all watchers. Probably took me
|
# Simple script to update the godoc comments on all watchers so you don't need
|
||||||
# more time to write this than doing it manually, but ah well 🙃
|
# to update the same comment 5 times.
|
||||||
|
|
||||||
watcher=$(<<EOF
|
watcher=$(<<EOF
|
||||||
// Watcher watches a set of paths, delivering events on a channel.
|
// Watcher watches a set of paths, delivering events on a channel.
|
||||||
@ -16,9 +16,9 @@ watcher=$(<<EOF
|
|||||||
// When a file is removed a Remove event won't be emitted until all file
|
// When a file is removed a Remove event won't be emitted until all file
|
||||||
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
||||||
//
|
//
|
||||||
// fp := os.Open("file")
|
// fp := os.Open("file")
|
||||||
// os.Remove("file") // Triggers Chmod
|
// os.Remove("file") // Triggers Chmod
|
||||||
// fp.Close() // Triggers Remove
|
// fp.Close() // Triggers Remove
|
||||||
//
|
//
|
||||||
// This is the event that inotify sends, so not much can be changed about this.
|
// This is the event that inotify sends, so not much can be changed about this.
|
||||||
//
|
//
|
||||||
@ -32,16 +32,16 @@ watcher=$(<<EOF
|
|||||||
//
|
//
|
||||||
// To increase them you can use sysctl or write the value to the /proc file:
|
// To increase them you can use sysctl or write the value to the /proc file:
|
||||||
//
|
//
|
||||||
// # Default values on Linux 5.18
|
// # Default values on Linux 5.18
|
||||||
// sysctl fs.inotify.max_user_watches=124983
|
// sysctl fs.inotify.max_user_watches=124983
|
||||||
// sysctl fs.inotify.max_user_instances=128
|
// sysctl fs.inotify.max_user_instances=128
|
||||||
//
|
//
|
||||||
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
||||||
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
||||||
// your distro's documentation):
|
// your distro's documentation):
|
||||||
//
|
//
|
||||||
// fs.inotify.max_user_watches=124983
|
// fs.inotify.max_user_watches=124983
|
||||||
// fs.inotify.max_user_instances=128
|
// fs.inotify.max_user_instances=128
|
||||||
//
|
//
|
||||||
// Reaching the limit will result in a "no space left on device" or "too many open
|
// Reaching the limit will result in a "no space left on device" or "too many open
|
||||||
// files" error.
|
// files" error.
|
||||||
@ -57,14 +57,20 @@ watcher=$(<<EOF
|
|||||||
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
||||||
// systems.
|
// systems.
|
||||||
//
|
//
|
||||||
// # macOS notes
|
// # Windows notes
|
||||||
//
|
//
|
||||||
// Spotlight indexing on macOS can result in multiple events (see [#15]). A
|
// Paths can be added as "C:\\path\\to\\dir", but forward slashes
|
||||||
// temporary workaround is to add your folder(s) to the "Spotlight Privacy
|
// ("C:/path/to/dir") will also work.
|
||||||
// Settings" until we have a native FSEvents implementation (see [#11]).
|
|
||||||
//
|
//
|
||||||
// [#11]: https://github.com/fsnotify/fsnotify/issues/11
|
// When a watched directory is removed it will always send an event for the
|
||||||
// [#15]: https://github.com/fsnotify/fsnotify/issues/15
|
// directory itself, but may not send events for all files in that directory.
|
||||||
|
// Sometimes it will send events for all times, sometimes it will send no
|
||||||
|
// events, and often only for some files.
|
||||||
|
//
|
||||||
|
// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
|
||||||
|
// value that is guaranteed to work with SMB filesystems. If you have many
|
||||||
|
// events in quick succession this may not be enough, and you will have to use
|
||||||
|
// [WithBufferSize] to increase the value.
|
||||||
EOF
|
EOF
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -73,20 +79,36 @@ new=$(<<EOF
|
|||||||
EOF
|
EOF
|
||||||
)
|
)
|
||||||
|
|
||||||
|
newbuffered=$(<<EOF
|
||||||
|
// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
|
||||||
|
// channel.
|
||||||
|
//
|
||||||
|
// The main use case for this is situations with a very large number of events
|
||||||
|
// where the kernel buffer size can't be increased (e.g. due to lack of
|
||||||
|
// permissions). An unbuffered Watcher will perform better for almost all use
|
||||||
|
// cases, and whenever possible you will be better off increasing the kernel
|
||||||
|
// buffers instead of adding a large userspace buffer.
|
||||||
|
EOF
|
||||||
|
)
|
||||||
|
|
||||||
add=$(<<EOF
|
add=$(<<EOF
|
||||||
// Add starts monitoring the path for changes.
|
// Add starts monitoring the path for changes.
|
||||||
//
|
//
|
||||||
// A path can only be watched once; attempting to watch it more than once will
|
// A path can only be watched once; watching it more than once is a no-op and will
|
||||||
// return an error. Paths that do not yet exist on the filesystem cannot be
|
// not return an error. Paths that do not yet exist on the filesystem cannot be
|
||||||
// added. A watch will be automatically removed if the path is deleted.
|
// watched.
|
||||||
//
|
//
|
||||||
// A path will remain watched if it gets renamed to somewhere else on the same
|
// A watch will be automatically removed if the watched path is deleted or
|
||||||
// filesystem, but the monitor will get removed if the path gets deleted and
|
// renamed. The exception is the Windows backend, which doesn't remove the
|
||||||
// re-created, or if it's moved to a different filesystem.
|
// watcher on renames.
|
||||||
//
|
//
|
||||||
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
||||||
// filesystems (/proc, /sys, etc.) generally don't work.
|
// filesystems (/proc, /sys, etc.) generally don't work.
|
||||||
//
|
//
|
||||||
|
// Returns [ErrClosed] if [Watcher.Close] was called.
|
||||||
|
//
|
||||||
|
// See [Watcher.AddWith] for a version that allows adding options.
|
||||||
|
//
|
||||||
// # Watching directories
|
// # Watching directories
|
||||||
//
|
//
|
||||||
// All files in a directory are monitored, including new files that are created
|
// All files in a directory are monitored, including new files that are created
|
||||||
@ -96,14 +118,27 @@ add=$(<<EOF
|
|||||||
// # Watching files
|
// # Watching files
|
||||||
//
|
//
|
||||||
// Watching individual files (rather than directories) is generally not
|
// Watching individual files (rather than directories) is generally not
|
||||||
// recommended as many tools update files atomically. Instead of "just" writing
|
// recommended as many programs (especially editors) update files atomically: it
|
||||||
// to the file a temporary file will be written to first, and if successful the
|
// will write to a temporary file which is then moved to to destination,
|
||||||
// temporary file is moved to to destination removing the original, or some
|
// overwriting the original (or some variant thereof). The watcher on the
|
||||||
// variant thereof. The watcher on the original file is now lost, as it no
|
// original file is now lost, as that no longer exists.
|
||||||
// longer exists.
|
|
||||||
//
|
//
|
||||||
// Instead, watch the parent directory and use Event.Name to filter out files
|
// The upshot of this is that a power failure or crash won't leave a
|
||||||
// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
|
// half-written file.
|
||||||
|
//
|
||||||
|
// Watch the parent directory and use Event.Name to filter out files you're not
|
||||||
|
// interested in. There is an example of this in cmd/fsnotify/file.go.
|
||||||
|
EOF
|
||||||
|
)
|
||||||
|
|
||||||
|
addwith=$(<<EOF
|
||||||
|
// AddWith is like [Watcher.Add], but allows adding options. When using Add()
|
||||||
|
// the defaults described below are used.
|
||||||
|
//
|
||||||
|
// Possible options are:
|
||||||
|
//
|
||||||
|
// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
|
||||||
|
// other platforms. The default is 64K (65536 bytes).
|
||||||
EOF
|
EOF
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -114,16 +149,21 @@ remove=$(<<EOF
|
|||||||
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
||||||
//
|
//
|
||||||
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
||||||
|
//
|
||||||
|
// Returns nil if [Watcher.Close] was called.
|
||||||
EOF
|
EOF
|
||||||
)
|
)
|
||||||
|
|
||||||
close=$(<<EOF
|
close=$(<<EOF
|
||||||
// Close removes all watches and closes the events channel.
|
// Close removes all watches and closes the Events channel.
|
||||||
EOF
|
EOF
|
||||||
)
|
)
|
||||||
|
|
||||||
watchlist=$(<<EOF
|
watchlist=$(<<EOF
|
||||||
// WatchList returns all paths added with [Add] (and are not yet removed).
|
// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
|
||||||
|
// yet removed).
|
||||||
|
//
|
||||||
|
// Returns nil if [Watcher.Close] was called.
|
||||||
EOF
|
EOF
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -153,20 +193,29 @@ events=$(<<EOF
|
|||||||
// initiated by the user may show up as one or multiple
|
// initiated by the user may show up as one or multiple
|
||||||
// writes, depending on when the system syncs things to
|
// writes, depending on when the system syncs things to
|
||||||
// disk. For example when compiling a large Go program
|
// disk. For example when compiling a large Go program
|
||||||
// you may get hundreds of Write events, so you
|
// you may get hundreds of Write events, and you may
|
||||||
// probably want to wait until you've stopped receiving
|
// want to wait until you've stopped receiving them
|
||||||
// them (see the dedup example in cmd/fsnotify).
|
// (see the dedup example in cmd/fsnotify).
|
||||||
|
//
|
||||||
|
// Some systems may send Write event for directories
|
||||||
|
// when the directory content changes.
|
||||||
//
|
//
|
||||||
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
||||||
// when a file is removed (or more accurately, when a
|
// when a file is removed (or more accurately, when a
|
||||||
// link to an inode is removed). On kqueue it's sent
|
// link to an inode is removed). On kqueue it's sent
|
||||||
// and on kqueue when a file is truncated. On Windows
|
// when a file is truncated. On Windows it's never
|
||||||
// it's never sent.
|
// sent.
|
||||||
EOF
|
EOF
|
||||||
)
|
)
|
||||||
|
|
||||||
errors=$(<<EOF
|
errors=$(<<EOF
|
||||||
// Errors sends any errors.
|
// Errors sends any errors.
|
||||||
|
//
|
||||||
|
// ErrEventOverflow is used to indicate there are too many events:
|
||||||
|
//
|
||||||
|
// - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl)
|
||||||
|
// - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
|
||||||
|
// - kqueue, fen: Not used.
|
||||||
EOF
|
EOF
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -200,7 +249,9 @@ set-cmt() {
|
|||||||
|
|
||||||
set-cmt '^type Watcher struct ' $watcher
|
set-cmt '^type Watcher struct ' $watcher
|
||||||
set-cmt '^func NewWatcher(' $new
|
set-cmt '^func NewWatcher(' $new
|
||||||
|
set-cmt '^func NewBufferedWatcher(' $newbuffered
|
||||||
set-cmt '^func (w \*Watcher) Add(' $add
|
set-cmt '^func (w \*Watcher) Add(' $add
|
||||||
|
set-cmt '^func (w \*Watcher) AddWith(' $addwith
|
||||||
set-cmt '^func (w \*Watcher) Remove(' $remove
|
set-cmt '^func (w \*Watcher) Remove(' $remove
|
||||||
set-cmt '^func (w \*Watcher) Close(' $close
|
set-cmt '^func (w \*Watcher) Close(' $close
|
||||||
set-cmt '^func (w \*Watcher) WatchList(' $watchlist
|
set-cmt '^func (w \*Watcher) WatchList(' $watchlist
|
||||||
|
2
vendor/github.com/gomarkdown/markdown/README.md
generated
vendored
2
vendor/github.com/gomarkdown/markdown/README.md
generated
vendored
@ -226,7 +226,7 @@ implements the following extensions:
|
|||||||
- **Hard line breaks**. With this extension enabled newlines in the input
|
- **Hard line breaks**. With this extension enabled newlines in the input
|
||||||
translates into line breaks in the output. This extension is off by default.
|
translates into line breaks in the output. This extension is off by default.
|
||||||
|
|
||||||
- **Non blocking space**. With this extension enabled spaces preceeded by a backslash
|
- **Non blocking space**. With this extension enabled spaces preceded by a backslash
|
||||||
in the input translates non-blocking spaces in the output. This extension is off by default.
|
in the input translates non-blocking spaces in the output. This extension is off by default.
|
||||||
|
|
||||||
- **Smart quotes**. Smartypants-style punctuation substitution is
|
- **Smart quotes**. Smartypants-style punctuation substitution is
|
||||||
|
7
vendor/github.com/gomarkdown/markdown/parser/block.go
generated
vendored
7
vendor/github.com/gomarkdown/markdown/parser/block.go
generated
vendored
@ -191,6 +191,11 @@ func (p *Parser) Block(data []byte) {
|
|||||||
// <div>
|
// <div>
|
||||||
// ...
|
// ...
|
||||||
// </div>
|
// </div>
|
||||||
|
|
||||||
|
if len(data) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
if data[0] == '<' {
|
if data[0] == '<' {
|
||||||
if i := p.html(data, true); i > 0 {
|
if i := p.html(data, true); i > 0 {
|
||||||
data = data[i:]
|
data = data[i:]
|
||||||
@ -393,7 +398,7 @@ func (p *Parser) AddBlock(n ast.Node) ast.Node {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *Parser) isPrefixHeading(data []byte) bool {
|
func (p *Parser) isPrefixHeading(data []byte) bool {
|
||||||
if data[0] != '#' {
|
if len(data) > 0 && data[0] != '#' {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
5
vendor/github.com/gomarkdown/markdown/parser/citation.go
generated
vendored
5
vendor/github.com/gomarkdown/markdown/parser/citation.go
generated
vendored
@ -65,6 +65,11 @@ func citation(p *Parser, data []byte, offset int) (int, ast.Node) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
citeType := ast.CitationTypeInformative
|
citeType := ast.CitationTypeInformative
|
||||||
|
|
||||||
|
if len(citation) < 2 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
j = 1
|
j = 1
|
||||||
switch citation[j] {
|
switch citation[j] {
|
||||||
case '!':
|
case '!':
|
||||||
|
2
vendor/github.com/gomarkdown/markdown/parser/inline.go
generated
vendored
2
vendor/github.com/gomarkdown/markdown/parser/inline.go
generated
vendored
@ -736,7 +736,7 @@ func leftAngle(p *Parser, data []byte, offset int) (int, ast.Node) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// '\\' backslash escape
|
// '\\' backslash escape
|
||||||
var escapeChars = []byte("\\`*_{}[]()#+-.!:|&<>~^")
|
var escapeChars = []byte("\\`*_{}[]()#+-.!:|&<>~^$")
|
||||||
|
|
||||||
func escape(p *Parser, data []byte, offset int) (int, ast.Node) {
|
func escape(p *Parser, data []byte, offset int) (int, ast.Node) {
|
||||||
data = data[offset:]
|
data = data[offset:]
|
||||||
|
6
vendor/github.com/gomarkdown/markdown/parser/parser.go
generated
vendored
6
vendor/github.com/gomarkdown/markdown/parser/parser.go
generated
vendored
@ -181,6 +181,12 @@ func NewWithExtensions(extension Extensions) *Parser {
|
|||||||
return &p
|
return &p
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *Parser) RegisterInline(n byte, fn inlineParser) inlineParser {
|
||||||
|
prev := p.inlineCallback[n]
|
||||||
|
p.inlineCallback[n] = fn
|
||||||
|
return prev
|
||||||
|
}
|
||||||
|
|
||||||
func (p *Parser) getRef(refid string) (ref *reference, found bool) {
|
func (p *Parser) getRef(refid string) (ref *reference, found bool) {
|
||||||
if p.ReferenceOverride != nil {
|
if p.ReferenceOverride != nil {
|
||||||
r, overridden := p.ReferenceOverride(refid)
|
r, overridden := p.ReferenceOverride(refid)
|
||||||
|
9
vendor/github.com/google/uuid/.travis.yml
generated
vendored
9
vendor/github.com/google/uuid/.travis.yml
generated
vendored
@ -1,9 +0,0 @@
|
|||||||
language: go
|
|
||||||
|
|
||||||
go:
|
|
||||||
- 1.4.3
|
|
||||||
- 1.5.3
|
|
||||||
- tip
|
|
||||||
|
|
||||||
script:
|
|
||||||
- go test -v ./...
|
|
21
vendor/github.com/google/uuid/CHANGELOG.md
generated
vendored
Normal file
21
vendor/github.com/google/uuid/CHANGELOG.md
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
# Changelog
|
||||||
|
|
||||||
|
## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26)
|
||||||
|
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
* UUIDs slice type with Strings() convenience method ([#133](https://github.com/google/uuid/issues/133)) ([cd5fbbd](https://github.com/google/uuid/commit/cd5fbbdd02f3e3467ac18940e07e062be1f864b4))
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
|
||||||
|
* Clarify that Parse's job is to parse but not necessarily validate strings. (Documents current behavior)
|
||||||
|
|
||||||
|
## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18)
|
||||||
|
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
* Use .EqualFold() to parse urn prefixed UUIDs ([#118](https://github.com/google/uuid/issues/118)) ([574e687](https://github.com/google/uuid/commit/574e6874943741fb99d41764c705173ada5293f0))
|
||||||
|
|
||||||
|
## Changelog
|
16
vendor/github.com/google/uuid/CONTRIBUTING.md
generated
vendored
16
vendor/github.com/google/uuid/CONTRIBUTING.md
generated
vendored
@ -2,6 +2,22 @@
|
|||||||
|
|
||||||
We definitely welcome patches and contribution to this project!
|
We definitely welcome patches and contribution to this project!
|
||||||
|
|
||||||
|
### Tips
|
||||||
|
|
||||||
|
Commits must be formatted according to the [Conventional Commits Specification](https://www.conventionalcommits.org).
|
||||||
|
|
||||||
|
Always try to include a test case! If it is not possible or not necessary,
|
||||||
|
please explain why in the pull request description.
|
||||||
|
|
||||||
|
### Releasing
|
||||||
|
|
||||||
|
Commits that would precipitate a SemVer change, as described in the Conventional
|
||||||
|
Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action)
|
||||||
|
to create a release candidate pull request. Once submitted, `release-please`
|
||||||
|
will create a release.
|
||||||
|
|
||||||
|
For tips on how to work with `release-please`, see its documentation.
|
||||||
|
|
||||||
### Legal requirements
|
### Legal requirements
|
||||||
|
|
||||||
In order to protect both you and ourselves, you will need to sign the
|
In order to protect both you and ourselves, you will need to sign the
|
||||||
|
10
vendor/github.com/google/uuid/README.md
generated
vendored
10
vendor/github.com/google/uuid/README.md
generated
vendored
@ -1,6 +1,6 @@
|
|||||||
# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master)
|
# uuid
|
||||||
The uuid package generates and inspects UUIDs based on
|
The uuid package generates and inspects UUIDs based on
|
||||||
[RFC 4122](http://tools.ietf.org/html/rfc4122)
|
[RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122)
|
||||||
and DCE 1.1: Authentication and Security Services.
|
and DCE 1.1: Authentication and Security Services.
|
||||||
|
|
||||||
This package is based on the github.com/pborman/uuid package (previously named
|
This package is based on the github.com/pborman/uuid package (previously named
|
||||||
@ -9,10 +9,12 @@ a UUID is a 16 byte array rather than a byte slice. One loss due to this
|
|||||||
change is the ability to represent an invalid UUID (vs a NIL UUID).
|
change is the ability to represent an invalid UUID (vs a NIL UUID).
|
||||||
|
|
||||||
###### Install
|
###### Install
|
||||||
`go get github.com/google/uuid`
|
```sh
|
||||||
|
go get github.com/google/uuid
|
||||||
|
```
|
||||||
|
|
||||||
###### Documentation
|
###### Documentation
|
||||||
[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid)
|
[![Go Reference](https://pkg.go.dev/badge/github.com/google/uuid.svg)](https://pkg.go.dev/github.com/google/uuid)
|
||||||
|
|
||||||
Full `go doc` style documentation for the package can be viewed online without
|
Full `go doc` style documentation for the package can be viewed online without
|
||||||
installing this package by using the GoDoc site here:
|
installing this package by using the GoDoc site here:
|
||||||
|
2
vendor/github.com/google/uuid/node_js.go
generated
vendored
2
vendor/github.com/google/uuid/node_js.go
generated
vendored
@ -7,6 +7,6 @@
|
|||||||
package uuid
|
package uuid
|
||||||
|
|
||||||
// getHardwareInterface returns nil values for the JS version of the code.
|
// getHardwareInterface returns nil values for the JS version of the code.
|
||||||
// This remvoves the "net" dependency, because it is not used in the browser.
|
// This removes the "net" dependency, because it is not used in the browser.
|
||||||
// Using the "net" library inflates the size of the transpiled JS code by 673k bytes.
|
// Using the "net" library inflates the size of the transpiled JS code by 673k bytes.
|
||||||
func getHardwareInterface(name string) (string, []byte) { return "", nil }
|
func getHardwareInterface(name string) (string, []byte) { return "", nil }
|
||||||
|
36
vendor/github.com/google/uuid/uuid.go
generated
vendored
36
vendor/github.com/google/uuid/uuid.go
generated
vendored
@ -56,11 +56,15 @@ func IsInvalidLengthError(err error) bool {
|
|||||||
return ok
|
return ok
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parse decodes s into a UUID or returns an error. Both the standard UUID
|
// Parse decodes s into a UUID or returns an error if it cannot be parsed. Both
|
||||||
// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
|
// the standard UUID forms defined in RFC 4122
|
||||||
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the
|
// (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
|
||||||
// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex
|
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx) are decoded. In addition,
|
||||||
// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.
|
// Parse accepts non-standard strings such as the raw hex encoding
|
||||||
|
// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx and 38 byte "Microsoft style" encodings,
|
||||||
|
// e.g. {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}. Only the middle 36 bytes are
|
||||||
|
// examined in the latter case. Parse should not be used to validate strings as
|
||||||
|
// it parses non-standard encodings as indicated above.
|
||||||
func Parse(s string) (UUID, error) {
|
func Parse(s string) (UUID, error) {
|
||||||
var uuid UUID
|
var uuid UUID
|
||||||
switch len(s) {
|
switch len(s) {
|
||||||
@ -69,7 +73,7 @@ func Parse(s string) (UUID, error) {
|
|||||||
|
|
||||||
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||||
case 36 + 9:
|
case 36 + 9:
|
||||||
if strings.ToLower(s[:9]) != "urn:uuid:" {
|
if !strings.EqualFold(s[:9], "urn:uuid:") {
|
||||||
return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9])
|
return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9])
|
||||||
}
|
}
|
||||||
s = s[9:]
|
s = s[9:]
|
||||||
@ -101,7 +105,8 @@ func Parse(s string) (UUID, error) {
|
|||||||
9, 11,
|
9, 11,
|
||||||
14, 16,
|
14, 16,
|
||||||
19, 21,
|
19, 21,
|
||||||
24, 26, 28, 30, 32, 34} {
|
24, 26, 28, 30, 32, 34,
|
||||||
|
} {
|
||||||
v, ok := xtob(s[x], s[x+1])
|
v, ok := xtob(s[x], s[x+1])
|
||||||
if !ok {
|
if !ok {
|
||||||
return uuid, errors.New("invalid UUID format")
|
return uuid, errors.New("invalid UUID format")
|
||||||
@ -117,7 +122,7 @@ func ParseBytes(b []byte) (UUID, error) {
|
|||||||
switch len(b) {
|
switch len(b) {
|
||||||
case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||||
case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||||
if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) {
|
if !bytes.EqualFold(b[:9], []byte("urn:uuid:")) {
|
||||||
return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9])
|
return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9])
|
||||||
}
|
}
|
||||||
b = b[9:]
|
b = b[9:]
|
||||||
@ -145,7 +150,8 @@ func ParseBytes(b []byte) (UUID, error) {
|
|||||||
9, 11,
|
9, 11,
|
||||||
14, 16,
|
14, 16,
|
||||||
19, 21,
|
19, 21,
|
||||||
24, 26, 28, 30, 32, 34} {
|
24, 26, 28, 30, 32, 34,
|
||||||
|
} {
|
||||||
v, ok := xtob(b[x], b[x+1])
|
v, ok := xtob(b[x], b[x+1])
|
||||||
if !ok {
|
if !ok {
|
||||||
return uuid, errors.New("invalid UUID format")
|
return uuid, errors.New("invalid UUID format")
|
||||||
@ -292,3 +298,15 @@ func DisableRandPool() {
|
|||||||
poolMu.Lock()
|
poolMu.Lock()
|
||||||
poolPos = randPoolSize
|
poolPos = randPoolSize
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UUIDs is a slice of UUID types.
|
||||||
|
type UUIDs []UUID
|
||||||
|
|
||||||
|
// Strings returns a string slice containing the string form of each UUID in uuids.
|
||||||
|
func (uuids UUIDs) Strings() []string {
|
||||||
|
var uuidStrs = make([]string, len(uuids))
|
||||||
|
for i, uuid := range uuids {
|
||||||
|
uuidStrs[i] = uuid.String()
|
||||||
|
}
|
||||||
|
return uuidStrs
|
||||||
|
}
|
||||||
|
20
vendor/github.com/gorilla/schema/.editorconfig
generated
vendored
Normal file
20
vendor/github.com/gorilla/schema/.editorconfig
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
; https://editorconfig.org/
|
||||||
|
|
||||||
|
root = true
|
||||||
|
|
||||||
|
[*]
|
||||||
|
insert_final_newline = true
|
||||||
|
charset = utf-8
|
||||||
|
trim_trailing_whitespace = true
|
||||||
|
indent_style = space
|
||||||
|
indent_size = 2
|
||||||
|
|
||||||
|
[{Makefile,go.mod,go.sum,*.go,.gitmodules}]
|
||||||
|
indent_style = tab
|
||||||
|
indent_size = 4
|
||||||
|
|
||||||
|
[*.md]
|
||||||
|
indent_size = 4
|
||||||
|
trim_trailing_whitespace = false
|
||||||
|
|
||||||
|
eclint_indent_style = unset
|
1
vendor/github.com/gorilla/schema/.gitignore
generated
vendored
Normal file
1
vendor/github.com/gorilla/schema/.gitignore
generated
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
coverage.coverprofile
|
2
vendor/github.com/gorilla/schema/LICENSE
generated
vendored
2
vendor/github.com/gorilla/schema/LICENSE
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
Copyright (c) 2012 Rodrigo Moraes. All rights reserved.
|
Copyright (c) 2023 The Gorilla Authors. All rights reserved.
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
Redistribution and use in source and binary forms, with or without
|
||||||
modification, are permitted provided that the following conditions are
|
modification, are permitted provided that the following conditions are
|
||||||
|
34
vendor/github.com/gorilla/schema/Makefile
generated
vendored
Normal file
34
vendor/github.com/gorilla/schema/Makefile
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
GO_LINT=$(shell which golangci-lint 2> /dev/null || echo '')
|
||||||
|
GO_LINT_URI=github.com/golangci/golangci-lint/cmd/golangci-lint@latest
|
||||||
|
|
||||||
|
GO_SEC=$(shell which gosec 2> /dev/null || echo '')
|
||||||
|
GO_SEC_URI=github.com/securego/gosec/v2/cmd/gosec@latest
|
||||||
|
|
||||||
|
GO_VULNCHECK=$(shell which govulncheck 2> /dev/null || echo '')
|
||||||
|
GO_VULNCHECK_URI=golang.org/x/vuln/cmd/govulncheck@latest
|
||||||
|
|
||||||
|
.PHONY: golangci-lint
|
||||||
|
golangci-lint:
|
||||||
|
$(if $(GO_LINT), ,go install $(GO_LINT_URI))
|
||||||
|
@echo "##### Running golangci-lint"
|
||||||
|
golangci-lint run -v
|
||||||
|
|
||||||
|
.PHONY: gosec
|
||||||
|
gosec:
|
||||||
|
$(if $(GO_SEC), ,go install $(GO_SEC_URI))
|
||||||
|
@echo "##### Running gosec"
|
||||||
|
gosec ./...
|
||||||
|
|
||||||
|
.PHONY: govulncheck
|
||||||
|
govulncheck:
|
||||||
|
$(if $(GO_VULNCHECK), ,go install $(GO_VULNCHECK_URI))
|
||||||
|
@echo "##### Running govulncheck"
|
||||||
|
govulncheck ./...
|
||||||
|
|
||||||
|
.PHONY: verify
|
||||||
|
verify: golangci-lint gosec govulncheck
|
||||||
|
|
||||||
|
.PHONY: test
|
||||||
|
test:
|
||||||
|
@echo "##### Running tests"
|
||||||
|
go test -race -cover -coverprofile=coverage.coverprofile -covermode=atomic -v ./...
|
12
vendor/github.com/gorilla/schema/README.md
generated
vendored
12
vendor/github.com/gorilla/schema/README.md
generated
vendored
@ -1,8 +1,12 @@
|
|||||||
schema
|
# gorilla/schema
|
||||||
======
|
|
||||||
[![GoDoc](https://godoc.org/github.com/gorilla/schema?status.svg)](https://godoc.org/github.com/gorilla/schema) [![Build Status](https://travis-ci.org/gorilla/schema.png?branch=master)](https://travis-ci.org/gorilla/schema)
|
|
||||||
[![Sourcegraph](https://sourcegraph.com/github.com/gorilla/schema/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/schema?badge)
|
|
||||||
|
|
||||||
|
![testing](https://github.com/gorilla/schema/actions/workflows/test.yml/badge.svg)
|
||||||
|
[![codecov](https://codecov.io/github/gorilla/schema/branch/main/graph/badge.svg)](https://codecov.io/github/gorilla/schema)
|
||||||
|
[![godoc](https://godoc.org/github.com/gorilla/schema?status.svg)](https://godoc.org/github.com/gorilla/schema)
|
||||||
|
[![sourcegraph](https://sourcegraph.com/github.com/gorilla/schema/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/schema?badge)
|
||||||
|
|
||||||
|
|
||||||
|
![Gorilla Logo](https://github.com/gorilla/.github/assets/53367916/d92caabf-98e0-473e-bfbf-ab554ba435e5)
|
||||||
|
|
||||||
Package gorilla/schema converts structs to and from form values.
|
Package gorilla/schema converts structs to and from form values.
|
||||||
|
|
||||||
|
12
vendor/github.com/gorilla/schema/cache.go
generated
vendored
12
vendor/github.com/gorilla/schema/cache.go
generated
vendored
@ -12,7 +12,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
var invalidPath = errors.New("schema: invalid path")
|
var errInvalidPath = errors.New("schema: invalid path")
|
||||||
|
|
||||||
// newCache returns a new cache.
|
// newCache returns a new cache.
|
||||||
func newCache() *cache {
|
func newCache() *cache {
|
||||||
@ -53,13 +53,13 @@ func (c *cache) parsePath(p string, t reflect.Type) ([]pathPart, error) {
|
|||||||
keys := strings.Split(p, ".")
|
keys := strings.Split(p, ".")
|
||||||
for i := 0; i < len(keys); i++ {
|
for i := 0; i < len(keys); i++ {
|
||||||
if t.Kind() != reflect.Struct {
|
if t.Kind() != reflect.Struct {
|
||||||
return nil, invalidPath
|
return nil, errInvalidPath
|
||||||
}
|
}
|
||||||
if struc = c.get(t); struc == nil {
|
if struc = c.get(t); struc == nil {
|
||||||
return nil, invalidPath
|
return nil, errInvalidPath
|
||||||
}
|
}
|
||||||
if field = struc.get(keys[i]); field == nil {
|
if field = struc.get(keys[i]); field == nil {
|
||||||
return nil, invalidPath
|
return nil, errInvalidPath
|
||||||
}
|
}
|
||||||
// Valid field. Append index.
|
// Valid field. Append index.
|
||||||
path = append(path, field.name)
|
path = append(path, field.name)
|
||||||
@ -72,10 +72,10 @@ func (c *cache) parsePath(p string, t reflect.Type) ([]pathPart, error) {
|
|||||||
// So checking i+2 is not necessary anymore.
|
// So checking i+2 is not necessary anymore.
|
||||||
i++
|
i++
|
||||||
if i+1 > len(keys) {
|
if i+1 > len(keys) {
|
||||||
return nil, invalidPath
|
return nil, errInvalidPath
|
||||||
}
|
}
|
||||||
if index64, err = strconv.ParseInt(keys[i], 10, 0); err != nil {
|
if index64, err = strconv.ParseInt(keys[i], 10, 0); err != nil {
|
||||||
return nil, invalidPath
|
return nil, errInvalidPath
|
||||||
}
|
}
|
||||||
parts = append(parts, pathPart{
|
parts = append(parts, pathPart{
|
||||||
path: path,
|
path: path,
|
||||||
|
2
vendor/github.com/gorilla/schema/decoder.go
generated
vendored
2
vendor/github.com/gorilla/schema/decoder.go
generated
vendored
@ -193,7 +193,7 @@ func (d *Decoder) decode(v reflect.Value, path string, parts []pathPart, values
|
|||||||
if v.Type().Kind() == reflect.Struct {
|
if v.Type().Kind() == reflect.Struct {
|
||||||
for i := 0; i < v.NumField(); i++ {
|
for i := 0; i < v.NumField(); i++ {
|
||||||
field := v.Field(i)
|
field := v.Field(i)
|
||||||
if field.Type().Kind() == reflect.Ptr && field.IsNil() && v.Type().Field(i).Anonymous == true {
|
if field.Type().Kind() == reflect.Ptr && field.IsNil() && v.Type().Field(i).Anonymous {
|
||||||
field.Set(reflect.New(field.Type().Elem()))
|
field.Set(reflect.New(field.Type().Elem()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
18
vendor/github.com/gorilla/schema/encoder.go
generated
vendored
18
vendor/github.com/gorilla/schema/encoder.go
generated
vendored
@ -3,6 +3,7 @@ package schema
|
|||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strconv"
|
"strconv"
|
||||||
)
|
)
|
||||||
@ -93,8 +94,11 @@ func (e *Encoder) encode(v reflect.Value, dst map[string][]string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Encode struct pointer types if the field is a valid pointer and a struct.
|
// Encode struct pointer types if the field is a valid pointer and a struct.
|
||||||
if isValidStructPointer(v.Field(i)) {
|
if isValidStructPointer(v.Field(i)) && !e.hasCustomEncoder(v.Field(i).Type()) {
|
||||||
e.encode(v.Field(i).Elem(), dst)
|
err := e.encode(v.Field(i).Elem(), dst)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -112,7 +116,10 @@ func (e *Encoder) encode(v reflect.Value, dst map[string][]string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if v.Field(i).Type().Kind() == reflect.Struct {
|
if v.Field(i).Type().Kind() == reflect.Struct {
|
||||||
e.encode(v.Field(i), dst)
|
err := e.encode(v.Field(i), dst)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -142,6 +149,11 @@ func (e *Encoder) encode(v reflect.Value, dst map[string][]string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (e *Encoder) hasCustomEncoder(t reflect.Type) bool {
|
||||||
|
_, exists := e.regenc[t]
|
||||||
|
return exists
|
||||||
|
}
|
||||||
|
|
||||||
func typeEncoder(t reflect.Type, reg map[reflect.Type]encoderFunc) encoderFunc {
|
func typeEncoder(t reflect.Type, reg map[reflect.Type]encoderFunc) encoderFunc {
|
||||||
if f, ok := reg[t]; ok {
|
if f, ok := reg[t]; ok {
|
||||||
return f
|
return f
|
||||||
|
20
vendor/github.com/klauspost/compress/.goreleaser.yml
generated
vendored
20
vendor/github.com/klauspost/compress/.goreleaser.yml
generated
vendored
@ -3,7 +3,7 @@
|
|||||||
before:
|
before:
|
||||||
hooks:
|
hooks:
|
||||||
- ./gen.sh
|
- ./gen.sh
|
||||||
- go install mvdan.cc/garble@v0.9.3
|
- go install mvdan.cc/garble@v0.10.1
|
||||||
|
|
||||||
builds:
|
builds:
|
||||||
-
|
-
|
||||||
@ -92,16 +92,7 @@ builds:
|
|||||||
archives:
|
archives:
|
||||||
-
|
-
|
||||||
id: s2-binaries
|
id: s2-binaries
|
||||||
name_template: "s2-{{ .Os }}_{{ .Arch }}_{{ .Version }}"
|
name_template: "s2-{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}"
|
||||||
replacements:
|
|
||||||
aix: AIX
|
|
||||||
darwin: OSX
|
|
||||||
linux: Linux
|
|
||||||
windows: Windows
|
|
||||||
386: i386
|
|
||||||
amd64: x86_64
|
|
||||||
freebsd: FreeBSD
|
|
||||||
netbsd: NetBSD
|
|
||||||
format_overrides:
|
format_overrides:
|
||||||
- goos: windows
|
- goos: windows
|
||||||
format: zip
|
format: zip
|
||||||
@ -125,7 +116,7 @@ changelog:
|
|||||||
|
|
||||||
nfpms:
|
nfpms:
|
||||||
-
|
-
|
||||||
file_name_template: "s2_package_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
|
file_name_template: "s2_package__{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}"
|
||||||
vendor: Klaus Post
|
vendor: Klaus Post
|
||||||
homepage: https://github.com/klauspost/compress
|
homepage: https://github.com/klauspost/compress
|
||||||
maintainer: Klaus Post <klauspost@gmail.com>
|
maintainer: Klaus Post <klauspost@gmail.com>
|
||||||
@ -134,8 +125,3 @@ nfpms:
|
|||||||
formats:
|
formats:
|
||||||
- deb
|
- deb
|
||||||
- rpm
|
- rpm
|
||||||
replacements:
|
|
||||||
darwin: Darwin
|
|
||||||
linux: Linux
|
|
||||||
freebsd: FreeBSD
|
|
||||||
amd64: x86_64
|
|
||||||
|
46
vendor/github.com/klauspost/compress/README.md
generated
vendored
46
vendor/github.com/klauspost/compress/README.md
generated
vendored
@ -16,6 +16,47 @@ This package provides various compression algorithms.
|
|||||||
|
|
||||||
# changelog
|
# changelog
|
||||||
|
|
||||||
|
* July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7)
|
||||||
|
* zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829
|
||||||
|
* s2: add GetBufferCapacity() method by @GiedriusS in https://github.com/klauspost/compress/pull/832
|
||||||
|
|
||||||
|
* June 13, 2023 - [v1.16.6](https://github.com/klauspost/compress/releases/tag/v1.16.6)
|
||||||
|
* zstd: correctly ignore WithEncoderPadding(1) by @ianlancetaylor in https://github.com/klauspost/compress/pull/806
|
||||||
|
* zstd: Add amd64 match length assembly https://github.com/klauspost/compress/pull/824
|
||||||
|
* gzhttp: Handle informational headers by @rtribotte in https://github.com/klauspost/compress/pull/815
|
||||||
|
* s2: Improve Better compression slightly https://github.com/klauspost/compress/pull/663
|
||||||
|
|
||||||
|
* Apr 16, 2023 - [v1.16.5](https://github.com/klauspost/compress/releases/tag/v1.16.5)
|
||||||
|
* zstd: readByte needs to use io.ReadFull by @jnoxon in https://github.com/klauspost/compress/pull/802
|
||||||
|
* gzip: Fix WriterTo after initial read https://github.com/klauspost/compress/pull/804
|
||||||
|
|
||||||
|
* Apr 5, 2023 - [v1.16.4](https://github.com/klauspost/compress/releases/tag/v1.16.4)
|
||||||
|
* zstd: Improve zstd best efficiency by @greatroar and @klauspost in https://github.com/klauspost/compress/pull/784
|
||||||
|
* zstd: Respect WithAllLitEntropyCompression https://github.com/klauspost/compress/pull/792
|
||||||
|
* zstd: Fix amd64 not always detecting corrupt data https://github.com/klauspost/compress/pull/785
|
||||||
|
* zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795
|
||||||
|
* s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779
|
||||||
|
* s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780
|
||||||
|
* gzhttp: Suppport ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799
|
||||||
|
|
||||||
|
* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1)
|
||||||
|
* zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776
|
||||||
|
* gzhttp: Add optional [BREACH mitigation](https://github.com/klauspost/compress/tree/master/gzhttp#breach-mitigation). https://github.com/klauspost/compress/pull/762 https://github.com/klauspost/compress/pull/768 https://github.com/klauspost/compress/pull/769 https://github.com/klauspost/compress/pull/770 https://github.com/klauspost/compress/pull/767
|
||||||
|
* s2: Add Intel LZ4s converter https://github.com/klauspost/compress/pull/766
|
||||||
|
* zstd: Minor bug fixes https://github.com/klauspost/compress/pull/771 https://github.com/klauspost/compress/pull/772 https://github.com/klauspost/compress/pull/773
|
||||||
|
* huff0: Speed up compress1xDo by @greatroar in https://github.com/klauspost/compress/pull/774
|
||||||
|
|
||||||
|
* Feb 26, 2023 - [v1.16.0](https://github.com/klauspost/compress/releases/tag/v1.16.0)
|
||||||
|
* s2: Add [Dictionary](https://github.com/klauspost/compress/tree/master/s2#dictionaries) support. https://github.com/klauspost/compress/pull/685
|
||||||
|
* s2: Add Compression Size Estimate. https://github.com/klauspost/compress/pull/752
|
||||||
|
* s2: Add support for custom stream encoder. https://github.com/klauspost/compress/pull/755
|
||||||
|
* s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748
|
||||||
|
* s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747
|
||||||
|
* s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>See changes to v1.15.x</summary>
|
||||||
|
|
||||||
* Jan 21st, 2023 (v1.15.15)
|
* Jan 21st, 2023 (v1.15.15)
|
||||||
* deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739
|
* deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739
|
||||||
* zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728
|
* zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728
|
||||||
@ -142,6 +183,8 @@ Stream decompression is now faster on asynchronous, since the goroutine allocati
|
|||||||
|
|
||||||
While the release has been extensively tested, it is recommended to testing when upgrading.
|
While the release has been extensively tested, it is recommended to testing when upgrading.
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary>See changes to v1.14.x</summary>
|
<summary>See changes to v1.14.x</summary>
|
||||||
|
|
||||||
@ -600,6 +643,9 @@ Here are other packages of good quality and pure Go (no cgo wrappers or autoconv
|
|||||||
* [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression.
|
* [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression.
|
||||||
* [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression.
|
* [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression.
|
||||||
* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer.
|
* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer.
|
||||||
|
* [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression.
|
||||||
|
* [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression.
|
||||||
|
* [github.com/minio/zipindex](https://github.com/minio/zipindex) - External ZIP directory index.
|
||||||
|
|
||||||
# license
|
# license
|
||||||
|
|
||||||
|
25
vendor/github.com/klauspost/compress/SECURITY.md
generated
vendored
Normal file
25
vendor/github.com/klauspost/compress/SECURITY.md
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
# Security Policy
|
||||||
|
|
||||||
|
## Supported Versions
|
||||||
|
|
||||||
|
Security updates are applied only to the latest release.
|
||||||
|
|
||||||
|
## Vulnerability Definition
|
||||||
|
|
||||||
|
A security vulnerability is a bug that with certain input triggers a crash or an infinite loop. Most calls will have varying execution time and only in rare cases will slow operation be considered a security vulnerability.
|
||||||
|
|
||||||
|
Corrupted output generally is not considered a security vulnerability, unless independent operations are able to affect each other. Note that not all functionality is re-entrant and safe to use concurrently.
|
||||||
|
|
||||||
|
Out-of-memory crashes only applies if the en/decoder uses an abnormal amount of memory, with appropriate options applied, to limit maximum window size, concurrency, etc. However, if you are in doubt you are welcome to file a security issue.
|
||||||
|
|
||||||
|
It is assumed that all callers are trusted, meaning internal data exposed through reflection or inspection of returned data structures is not considered a vulnerability.
|
||||||
|
|
||||||
|
Vulnerabilities resulting from compiler/assembler errors should be reported upstream. Depending on the severity this package may or may not implement a workaround.
|
||||||
|
|
||||||
|
## Reporting a Vulnerability
|
||||||
|
|
||||||
|
If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released.
|
||||||
|
|
||||||
|
Please disclose it at [security advisory](https://github.com/klauspost/compress/security/advisories/new). If possible please provide a minimal reproducer. If the issue only applies to a single platform, it would be helpful to provide access to that.
|
||||||
|
|
||||||
|
This project is maintained by a team of volunteers on a reasonable-effort basis. As such, vulnerabilities will be disclosed in a best effort base.
|
3
vendor/github.com/klauspost/compress/fse/bitwriter.go
generated
vendored
3
vendor/github.com/klauspost/compress/fse/bitwriter.go
generated
vendored
@ -152,12 +152,11 @@ func (b *bitWriter) flushAlign() {
|
|||||||
|
|
||||||
// close will write the alignment bit and write the final byte(s)
|
// close will write the alignment bit and write the final byte(s)
|
||||||
// to the output.
|
// to the output.
|
||||||
func (b *bitWriter) close() error {
|
func (b *bitWriter) close() {
|
||||||
// End mark
|
// End mark
|
||||||
b.addBits16Clean(1, 1)
|
b.addBits16Clean(1, 1)
|
||||||
// flush until next byte.
|
// flush until next byte.
|
||||||
b.flushAlign()
|
b.flushAlign()
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// reset and continue writing by appending to out.
|
// reset and continue writing by appending to out.
|
||||||
|
3
vendor/github.com/klauspost/compress/fse/compress.go
generated
vendored
3
vendor/github.com/klauspost/compress/fse/compress.go
generated
vendored
@ -199,7 +199,8 @@ func (s *Scratch) compress(src []byte) error {
|
|||||||
c2.flush(s.actualTableLog)
|
c2.flush(s.actualTableLog)
|
||||||
c1.flush(s.actualTableLog)
|
c1.flush(s.actualTableLog)
|
||||||
|
|
||||||
return s.bw.close()
|
s.bw.close()
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// writeCount will write the normalized histogram count to header.
|
// writeCount will write the normalized histogram count to header.
|
||||||
|
4
vendor/github.com/klauspost/compress/fse/decompress.go
generated
vendored
4
vendor/github.com/klauspost/compress/fse/decompress.go
generated
vendored
@ -260,7 +260,9 @@ func (s *Scratch) buildDtable() error {
|
|||||||
// If the buffer is over-read an error is returned.
|
// If the buffer is over-read an error is returned.
|
||||||
func (s *Scratch) decompress() error {
|
func (s *Scratch) decompress() error {
|
||||||
br := &s.bits
|
br := &s.bits
|
||||||
br.init(s.br.unread())
|
if err := br.init(s.br.unread()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
var s1, s2 decoder
|
var s1, s2 decoder
|
||||||
// Initialize and decode first state and symbol.
|
// Initialize and decode first state and symbol.
|
||||||
|
27
vendor/github.com/klauspost/compress/huff0/bitwriter.go
generated
vendored
27
vendor/github.com/klauspost/compress/huff0/bitwriter.go
generated
vendored
@ -13,14 +13,6 @@ type bitWriter struct {
|
|||||||
out []byte
|
out []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
// bitMask16 is bitmasks. Has extra to avoid bounds check.
|
|
||||||
var bitMask16 = [32]uint16{
|
|
||||||
0, 1, 3, 7, 0xF, 0x1F,
|
|
||||||
0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF,
|
|
||||||
0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF,
|
|
||||||
0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF,
|
|
||||||
0xFFFF, 0xFFFF} /* up to 16 bits */
|
|
||||||
|
|
||||||
// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
|
// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
|
||||||
// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
|
// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
|
||||||
func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
|
func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
|
||||||
@ -60,6 +52,22 @@ func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) {
|
|||||||
b.nBits += encA.nBits + encB.nBits
|
b.nBits += encA.nBits + encB.nBits
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// encFourSymbols adds up to 32 bits from four symbols.
|
||||||
|
// It will not check if there is space for them,
|
||||||
|
// so the caller must ensure that b has been flushed recently.
|
||||||
|
func (b *bitWriter) encFourSymbols(encA, encB, encC, encD cTableEntry) {
|
||||||
|
bitsA := encA.nBits
|
||||||
|
bitsB := bitsA + encB.nBits
|
||||||
|
bitsC := bitsB + encC.nBits
|
||||||
|
bitsD := bitsC + encD.nBits
|
||||||
|
combined := uint64(encA.val) |
|
||||||
|
(uint64(encB.val) << (bitsA & 63)) |
|
||||||
|
(uint64(encC.val) << (bitsB & 63)) |
|
||||||
|
(uint64(encD.val) << (bitsC & 63))
|
||||||
|
b.bitContainer |= combined << (b.nBits & 63)
|
||||||
|
b.nBits += bitsD
|
||||||
|
}
|
||||||
|
|
||||||
// flush32 will flush out, so there are at least 32 bits available for writing.
|
// flush32 will flush out, so there are at least 32 bits available for writing.
|
||||||
func (b *bitWriter) flush32() {
|
func (b *bitWriter) flush32() {
|
||||||
if b.nBits < 32 {
|
if b.nBits < 32 {
|
||||||
@ -86,10 +94,9 @@ func (b *bitWriter) flushAlign() {
|
|||||||
|
|
||||||
// close will write the alignment bit and write the final byte(s)
|
// close will write the alignment bit and write the final byte(s)
|
||||||
// to the output.
|
// to the output.
|
||||||
func (b *bitWriter) close() error {
|
func (b *bitWriter) close() {
|
||||||
// End mark
|
// End mark
|
||||||
b.addBits16Clean(1, 1)
|
b.addBits16Clean(1, 1)
|
||||||
// flush until next byte.
|
// flush until next byte.
|
||||||
b.flushAlign()
|
b.flushAlign()
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
23
vendor/github.com/klauspost/compress/huff0/compress.go
generated
vendored
23
vendor/github.com/klauspost/compress/huff0/compress.go
generated
vendored
@ -227,10 +227,10 @@ func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err err
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *Scratch) compress1X(src []byte) ([]byte, error) {
|
func (s *Scratch) compress1X(src []byte) ([]byte, error) {
|
||||||
return s.compress1xDo(s.Out, src)
|
return s.compress1xDo(s.Out, src), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) {
|
func (s *Scratch) compress1xDo(dst, src []byte) []byte {
|
||||||
var bw = bitWriter{out: dst}
|
var bw = bitWriter{out: dst}
|
||||||
|
|
||||||
// N is length divisible by 4.
|
// N is length divisible by 4.
|
||||||
@ -248,8 +248,7 @@ func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) {
|
|||||||
tmp := src[n : n+4]
|
tmp := src[n : n+4]
|
||||||
// tmp should be len 4
|
// tmp should be len 4
|
||||||
bw.flush32()
|
bw.flush32()
|
||||||
bw.encTwoSymbols(cTable, tmp[3], tmp[2])
|
bw.encFourSymbols(cTable[tmp[3]], cTable[tmp[2]], cTable[tmp[1]], cTable[tmp[0]])
|
||||||
bw.encTwoSymbols(cTable, tmp[1], tmp[0])
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for ; n >= 0; n -= 4 {
|
for ; n >= 0; n -= 4 {
|
||||||
@ -261,8 +260,8 @@ func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) {
|
|||||||
bw.encTwoSymbols(cTable, tmp[1], tmp[0])
|
bw.encTwoSymbols(cTable, tmp[1], tmp[0])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
err := bw.close()
|
bw.close()
|
||||||
return bw.out, err
|
return bw.out
|
||||||
}
|
}
|
||||||
|
|
||||||
var sixZeros [6]byte
|
var sixZeros [6]byte
|
||||||
@ -284,12 +283,8 @@ func (s *Scratch) compress4X(src []byte) ([]byte, error) {
|
|||||||
}
|
}
|
||||||
src = src[len(toDo):]
|
src = src[len(toDo):]
|
||||||
|
|
||||||
var err error
|
|
||||||
idx := len(s.Out)
|
idx := len(s.Out)
|
||||||
s.Out, err = s.compress1xDo(s.Out, toDo)
|
s.Out = s.compress1xDo(s.Out, toDo)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if len(s.Out)-idx > math.MaxUint16 {
|
if len(s.Out)-idx > math.MaxUint16 {
|
||||||
// We cannot store the size in the jump table
|
// We cannot store the size in the jump table
|
||||||
return nil, ErrIncompressible
|
return nil, ErrIncompressible
|
||||||
@ -316,7 +311,6 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) {
|
|||||||
|
|
||||||
segmentSize := (len(src) + 3) / 4
|
segmentSize := (len(src) + 3) / 4
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
var errs [4]error
|
|
||||||
wg.Add(4)
|
wg.Add(4)
|
||||||
for i := 0; i < 4; i++ {
|
for i := 0; i < 4; i++ {
|
||||||
toDo := src
|
toDo := src
|
||||||
@ -327,15 +321,12 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) {
|
|||||||
|
|
||||||
// Separate goroutine for each block.
|
// Separate goroutine for each block.
|
||||||
go func(i int) {
|
go func(i int) {
|
||||||
s.tmpOut[i], errs[i] = s.compress1xDo(s.tmpOut[i][:0], toDo)
|
s.tmpOut[i] = s.compress1xDo(s.tmpOut[i][:0], toDo)
|
||||||
wg.Done()
|
wg.Done()
|
||||||
}(i)
|
}(i)
|
||||||
}
|
}
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
for i := 0; i < 4; i++ {
|
for i := 0; i < 4; i++ {
|
||||||
if errs[i] != nil {
|
|
||||||
return nil, errs[i]
|
|
||||||
}
|
|
||||||
o := s.tmpOut[i]
|
o := s.tmpOut[i]
|
||||||
if len(o) > math.MaxUint16 {
|
if len(o) > math.MaxUint16 {
|
||||||
// We cannot store the size in the jump table
|
// We cannot store the size in the jump table
|
||||||
|
2
vendor/github.com/klauspost/compress/huff0/decompress.go
generated
vendored
2
vendor/github.com/klauspost/compress/huff0/decompress.go
generated
vendored
@ -253,7 +253,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
|
|||||||
|
|
||||||
switch d.actualTableLog {
|
switch d.actualTableLog {
|
||||||
case 8:
|
case 8:
|
||||||
const shift = 8 - 8
|
const shift = 0
|
||||||
for br.off >= 4 {
|
for br.off >= 4 {
|
||||||
br.fillFast()
|
br.fillFast()
|
||||||
v := dt[uint8(br.value>>(56+shift))]
|
v := dt[uint8(br.value>>(56+shift))]
|
||||||
|
12
vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
generated
vendored
12
vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
generated
vendored
@ -87,18 +87,6 @@ func emitCopy(dst []byte, offset, length int) int {
|
|||||||
return i + 2
|
return i + 2
|
||||||
}
|
}
|
||||||
|
|
||||||
// extendMatch returns the largest k such that k <= len(src) and that
|
|
||||||
// src[i:i+k-j] and src[j:k] have the same contents.
|
|
||||||
//
|
|
||||||
// It assumes that:
|
|
||||||
//
|
|
||||||
// 0 <= i && i < j && j <= len(src)
|
|
||||||
func extendMatch(src []byte, i, j int) int {
|
|
||||||
for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
|
|
||||||
}
|
|
||||||
return j
|
|
||||||
}
|
|
||||||
|
|
||||||
func hash(u, shift uint32) uint32 {
|
func hash(u, shift uint32) uint32 {
|
||||||
return (u * 0x1e35a7bd) >> shift
|
return (u * 0x1e35a7bd) >> shift
|
||||||
}
|
}
|
||||||
|
1044
vendor/github.com/klauspost/compress/s2/decode.go
generated
vendored
1044
vendor/github.com/klauspost/compress/s2/decode.go
generated
vendored
File diff suppressed because it is too large
Load Diff
19
vendor/github.com/klauspost/compress/s2/dict.go
generated
vendored
19
vendor/github.com/klauspost/compress/s2/dict.go
generated
vendored
@ -106,6 +106,25 @@ func MakeDict(data []byte, searchStart []byte) *Dict {
|
|||||||
return &d
|
return &d
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MakeDictManual will create a dictionary.
|
||||||
|
// 'data' must be at least MinDictSize and less than or equal to MaxDictSize.
|
||||||
|
// A manual first repeat index into data must be provided.
|
||||||
|
// It must be less than len(data)-8.
|
||||||
|
func MakeDictManual(data []byte, firstIdx uint16) *Dict {
|
||||||
|
if len(data) < MinDictSize || int(firstIdx) >= len(data)-8 || len(data) > MaxDictSize {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var d Dict
|
||||||
|
dict := data
|
||||||
|
d.dict = dict
|
||||||
|
if cap(d.dict) < len(d.dict)+16 {
|
||||||
|
d.dict = append(make([]byte, 0, len(d.dict)+16), d.dict...)
|
||||||
|
}
|
||||||
|
|
||||||
|
d.repeat = int(firstIdx)
|
||||||
|
return &d
|
||||||
|
}
|
||||||
|
|
||||||
// Encode returns the encoded form of src. The returned slice may be a sub-
|
// Encode returns the encoded form of src. The returned slice may be a sub-
|
||||||
// slice of dst if dst was large enough to hold the entire encoded block.
|
// slice of dst if dst was large enough to hold the entire encoded block.
|
||||||
// Otherwise, a newly allocated slice will be returned.
|
// Otherwise, a newly allocated slice will be returned.
|
||||||
|
1030
vendor/github.com/klauspost/compress/s2/encode.go
generated
vendored
1030
vendor/github.com/klauspost/compress/s2/encode.go
generated
vendored
File diff suppressed because it is too large
Load Diff
1
vendor/github.com/klauspost/compress/s2/encode_all.go
generated
vendored
1
vendor/github.com/klauspost/compress/s2/encode_all.go
generated
vendored
@ -742,7 +742,6 @@ searchDict:
|
|||||||
x := load64(src, s-2)
|
x := load64(src, s-2)
|
||||||
m2Hash := hash6(x, tableBits)
|
m2Hash := hash6(x, tableBits)
|
||||||
currHash := hash6(x>>8, tableBits)
|
currHash := hash6(x>>8, tableBits)
|
||||||
candidate = int(table[currHash])
|
|
||||||
table[m2Hash] = uint32(s - 2)
|
table[m2Hash] = uint32(s - 2)
|
||||||
table[currHash] = uint32(s - 1)
|
table[currHash] = uint32(s - 1)
|
||||||
cv = load64(src, s)
|
cv = load64(src, s)
|
||||||
|
44
vendor/github.com/klauspost/compress/s2/encode_better.go
generated
vendored
44
vendor/github.com/klauspost/compress/s2/encode_better.go
generated
vendored
@ -157,7 +157,6 @@ func encodeBlockBetterGo(dst, src []byte) (d int) {
|
|||||||
index0 := base + 1
|
index0 := base + 1
|
||||||
index1 := s - 2
|
index1 := s - 2
|
||||||
|
|
||||||
cv = load64(src, s)
|
|
||||||
for index0 < index1 {
|
for index0 < index1 {
|
||||||
cv0 := load64(src, index0)
|
cv0 := load64(src, index0)
|
||||||
cv1 := load64(src, index1)
|
cv1 := load64(src, index1)
|
||||||
@ -269,18 +268,21 @@ func encodeBlockBetterGo(dst, src []byte) (d int) {
|
|||||||
lTable[hash7(cv0, lTableBits)] = uint32(index0)
|
lTable[hash7(cv0, lTableBits)] = uint32(index0)
|
||||||
sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
|
sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
|
||||||
|
|
||||||
|
// lTable could be postponed, but very minor difference.
|
||||||
lTable[hash7(cv1, lTableBits)] = uint32(index1)
|
lTable[hash7(cv1, lTableBits)] = uint32(index1)
|
||||||
sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
|
sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
|
||||||
index0 += 1
|
index0 += 1
|
||||||
index1 -= 1
|
index1 -= 1
|
||||||
cv = load64(src, s)
|
cv = load64(src, s)
|
||||||
|
|
||||||
// index every second long in between.
|
// Index large values sparsely in between.
|
||||||
for index0 < index1 {
|
// We do two starting from different offsets for speed.
|
||||||
|
index2 := (index0 + index1 + 1) >> 1
|
||||||
|
for index2 < index1 {
|
||||||
lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
|
lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
|
||||||
lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1)
|
lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2)
|
||||||
index0 += 2
|
index0 += 2
|
||||||
index1 -= 2
|
index2 += 2
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -459,12 +461,14 @@ func encodeBlockBetterSnappyGo(dst, src []byte) (d int) {
|
|||||||
index1 -= 1
|
index1 -= 1
|
||||||
cv = load64(src, s)
|
cv = load64(src, s)
|
||||||
|
|
||||||
// index every second long in between.
|
// Index large values sparsely in between.
|
||||||
for index0 < index1 {
|
// We do two starting from different offsets for speed.
|
||||||
|
index2 := (index0 + index1 + 1) >> 1
|
||||||
|
for index2 < index1 {
|
||||||
lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
|
lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
|
||||||
lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1)
|
lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2)
|
||||||
index0 += 2
|
index0 += 2
|
||||||
index1 -= 2
|
index2 += 2
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -599,7 +603,6 @@ searchDict:
|
|||||||
if s >= sLimit {
|
if s >= sLimit {
|
||||||
break searchDict
|
break searchDict
|
||||||
}
|
}
|
||||||
cv = load64(src, s)
|
|
||||||
// Index in-between
|
// Index in-between
|
||||||
index0 := base + 1
|
index0 := base + 1
|
||||||
index1 := s - 2
|
index1 := s - 2
|
||||||
@ -865,12 +868,14 @@ searchDict:
|
|||||||
index1 -= 1
|
index1 -= 1
|
||||||
cv = load64(src, s)
|
cv = load64(src, s)
|
||||||
|
|
||||||
// index every second long in between.
|
// Index large values sparsely in between.
|
||||||
for index0 < index1 {
|
// We do two starting from different offsets for speed.
|
||||||
|
index2 := (index0 + index1 + 1) >> 1
|
||||||
|
for index2 < index1 {
|
||||||
lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
|
lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
|
||||||
lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1)
|
lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2)
|
||||||
index0 += 2
|
index0 += 2
|
||||||
index1 -= 2
|
index2 += 2
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -961,7 +966,6 @@ searchDict:
|
|||||||
index0 := base + 1
|
index0 := base + 1
|
||||||
index1 := s - 2
|
index1 := s - 2
|
||||||
|
|
||||||
cv = load64(src, s)
|
|
||||||
for index0 < index1 {
|
for index0 < index1 {
|
||||||
cv0 := load64(src, index0)
|
cv0 := load64(src, index0)
|
||||||
cv1 := load64(src, index1)
|
cv1 := load64(src, index1)
|
||||||
@ -1079,12 +1083,14 @@ searchDict:
|
|||||||
index1 -= 1
|
index1 -= 1
|
||||||
cv = load64(src, s)
|
cv = load64(src, s)
|
||||||
|
|
||||||
// index every second long in between.
|
// Index large values sparsely in between.
|
||||||
for index0 < index1 {
|
// We do two starting from different offsets for speed.
|
||||||
|
index2 := (index0 + index1 + 1) >> 1
|
||||||
|
for index2 < index1 {
|
||||||
lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
|
lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
|
||||||
lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1)
|
lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2)
|
||||||
index0 += 2
|
index0 += 2
|
||||||
index1 -= 2
|
index2 += 2
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
8
vendor/github.com/klauspost/compress/s2/encode_go.go
generated
vendored
8
vendor/github.com/klauspost/compress/s2/encode_go.go
generated
vendored
@ -717,3 +717,11 @@ func cvtLZ4BlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) {
|
|||||||
func cvtLZ4BlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) {
|
func cvtLZ4BlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) {
|
||||||
panic("cvtLZ4BlockSnappyAsm should be unreachable")
|
panic("cvtLZ4BlockSnappyAsm should be unreachable")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func cvtLZ4sBlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) {
|
||||||
|
panic("cvtLZ4sBlockAsm should be unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
func cvtLZ4sBlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) {
|
||||||
|
panic("cvtLZ4sBlockSnappyAsm should be unreachable")
|
||||||
|
}
|
||||||
|
12
vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go
generated
vendored
12
vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go
generated
vendored
@ -212,7 +212,17 @@ func matchLen(a []byte, b []byte) int
|
|||||||
//go:noescape
|
//go:noescape
|
||||||
func cvtLZ4BlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int)
|
func cvtLZ4BlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int)
|
||||||
|
|
||||||
// cvtLZ4Block converts an LZ4 block to S2
|
// cvtLZ4sBlock converts an LZ4s block to S2
|
||||||
|
//
|
||||||
|
//go:noescape
|
||||||
|
func cvtLZ4sBlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int)
|
||||||
|
|
||||||
|
// cvtLZ4Block converts an LZ4 block to Snappy
|
||||||
//
|
//
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func cvtLZ4BlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int)
|
func cvtLZ4BlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int)
|
||||||
|
|
||||||
|
// cvtLZ4sBlock converts an LZ4s block to Snappy
|
||||||
|
//
|
||||||
|
//go:noescape
|
||||||
|
func cvtLZ4sBlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int)
|
||||||
|
4917
vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s
generated
vendored
4917
vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s
generated
vendored
File diff suppressed because it is too large
Load Diff
20
vendor/github.com/klauspost/compress/s2/index.go
generated
vendored
20
vendor/github.com/klauspost/compress/s2/index.go
generated
vendored
@ -511,24 +511,22 @@ func IndexStream(r io.Reader) ([]byte, error) {
|
|||||||
|
|
||||||
// JSON returns the index as JSON text.
|
// JSON returns the index as JSON text.
|
||||||
func (i *Index) JSON() []byte {
|
func (i *Index) JSON() []byte {
|
||||||
|
type offset struct {
|
||||||
|
CompressedOffset int64 `json:"compressed"`
|
||||||
|
UncompressedOffset int64 `json:"uncompressed"`
|
||||||
|
}
|
||||||
x := struct {
|
x := struct {
|
||||||
TotalUncompressed int64 `json:"total_uncompressed"` // Total Uncompressed size if known. Will be -1 if unknown.
|
TotalUncompressed int64 `json:"total_uncompressed"` // Total Uncompressed size if known. Will be -1 if unknown.
|
||||||
TotalCompressed int64 `json:"total_compressed"` // Total Compressed size if known. Will be -1 if unknown.
|
TotalCompressed int64 `json:"total_compressed"` // Total Compressed size if known. Will be -1 if unknown.
|
||||||
Offsets []struct {
|
Offsets []offset `json:"offsets"`
|
||||||
CompressedOffset int64 `json:"compressed"`
|
EstBlockUncomp int64 `json:"est_block_uncompressed"`
|
||||||
UncompressedOffset int64 `json:"uncompressed"`
|
|
||||||
} `json:"offsets"`
|
|
||||||
EstBlockUncomp int64 `json:"est_block_uncompressed"`
|
|
||||||
}{
|
}{
|
||||||
TotalUncompressed: i.TotalUncompressed,
|
TotalUncompressed: i.TotalUncompressed,
|
||||||
TotalCompressed: i.TotalCompressed,
|
TotalCompressed: i.TotalCompressed,
|
||||||
EstBlockUncomp: i.estBlockUncomp,
|
EstBlockUncomp: i.estBlockUncomp,
|
||||||
}
|
}
|
||||||
for _, v := range i.info {
|
for _, v := range i.info {
|
||||||
x.Offsets = append(x.Offsets, struct {
|
x.Offsets = append(x.Offsets, offset{CompressedOffset: v.compressedOffset, UncompressedOffset: v.uncompressedOffset})
|
||||||
CompressedOffset int64 `json:"compressed"`
|
|
||||||
UncompressedOffset int64 `json:"uncompressed"`
|
|
||||||
}{CompressedOffset: v.compressedOffset, UncompressedOffset: v.uncompressedOffset})
|
|
||||||
}
|
}
|
||||||
b, _ := json.MarshalIndent(x, "", " ")
|
b, _ := json.MarshalIndent(x, "", " ")
|
||||||
return b
|
return b
|
||||||
|
467
vendor/github.com/klauspost/compress/s2/lz4sconvert.go
generated
vendored
Normal file
467
vendor/github.com/klauspost/compress/s2/lz4sconvert.go
generated
vendored
Normal file
@ -0,0 +1,467 @@
|
|||||||
|
// Copyright (c) 2022 Klaus Post. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package s2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// LZ4sConverter provides conversion from LZ4s.
|
||||||
|
// (Intel modified LZ4 Blocks)
|
||||||
|
// https://cdrdv2-public.intel.com/743912/743912-qat-programmers-guide-v2.0.pdf
|
||||||
|
// LZ4s is a variant of LZ4 block format. LZ4s should be considered as an intermediate compressed block format.
|
||||||
|
// The LZ4s format is selected when the application sets the compType to CPA_DC_LZ4S in CpaDcSessionSetupData.
|
||||||
|
// The LZ4s block returned by the Intel® QAT hardware can be used by an external
|
||||||
|
// software post-processing to generate other compressed data formats.
|
||||||
|
// The following table lists the differences between LZ4 and LZ4s block format. LZ4s block format uses
|
||||||
|
// the same high-level formatting as LZ4 block format with the following encoding changes:
|
||||||
|
// For Min Match of 4 bytes, Copy length value 1-15 means length 4-18 with 18 bytes adding an extra byte.
|
||||||
|
// ONLY "Min match of 4 bytes" is supported.
|
||||||
|
type LZ4sConverter struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConvertBlock will convert an LZ4s block and append it as an S2
|
||||||
|
// block without block length to dst.
|
||||||
|
// The uncompressed size is returned as well.
|
||||||
|
// dst must have capacity to contain the entire compressed block.
|
||||||
|
func (l *LZ4sConverter) ConvertBlock(dst, src []byte) ([]byte, int, error) {
|
||||||
|
if len(src) == 0 {
|
||||||
|
return dst, 0, nil
|
||||||
|
}
|
||||||
|
const debug = false
|
||||||
|
const inline = true
|
||||||
|
const lz4MinMatch = 3
|
||||||
|
|
||||||
|
s, d := 0, len(dst)
|
||||||
|
dst = dst[:cap(dst)]
|
||||||
|
if !debug && hasAmd64Asm {
|
||||||
|
res, sz := cvtLZ4sBlockAsm(dst[d:], src)
|
||||||
|
if res < 0 {
|
||||||
|
const (
|
||||||
|
errCorrupt = -1
|
||||||
|
errDstTooSmall = -2
|
||||||
|
)
|
||||||
|
switch res {
|
||||||
|
case errCorrupt:
|
||||||
|
return nil, 0, ErrCorrupt
|
||||||
|
case errDstTooSmall:
|
||||||
|
return nil, 0, ErrDstTooSmall
|
||||||
|
default:
|
||||||
|
return nil, 0, fmt.Errorf("unexpected result: %d", res)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if d+sz > len(dst) {
|
||||||
|
return nil, 0, ErrDstTooSmall
|
||||||
|
}
|
||||||
|
return dst[:d+sz], res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
dLimit := len(dst) - 10
|
||||||
|
var lastOffset uint16
|
||||||
|
var uncompressed int
|
||||||
|
if debug {
|
||||||
|
fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst))
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
if s >= len(src) {
|
||||||
|
return dst[:d], 0, ErrCorrupt
|
||||||
|
}
|
||||||
|
// Read literal info
|
||||||
|
token := src[s]
|
||||||
|
ll := int(token >> 4)
|
||||||
|
ml := int(lz4MinMatch + (token & 0xf))
|
||||||
|
|
||||||
|
// If upper nibble is 15, literal length is extended
|
||||||
|
if token >= 0xf0 {
|
||||||
|
for {
|
||||||
|
s++
|
||||||
|
if s >= len(src) {
|
||||||
|
if debug {
|
||||||
|
fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src))
|
||||||
|
}
|
||||||
|
return dst[:d], 0, ErrCorrupt
|
||||||
|
}
|
||||||
|
val := src[s]
|
||||||
|
ll += int(val)
|
||||||
|
if val != 255 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Skip past token
|
||||||
|
if s+ll >= len(src) {
|
||||||
|
if debug {
|
||||||
|
fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src))
|
||||||
|
}
|
||||||
|
return nil, 0, ErrCorrupt
|
||||||
|
}
|
||||||
|
s++
|
||||||
|
if ll > 0 {
|
||||||
|
if d+ll > dLimit {
|
||||||
|
return nil, 0, ErrDstTooSmall
|
||||||
|
}
|
||||||
|
if debug {
|
||||||
|
fmt.Printf("emit %d literals\n", ll)
|
||||||
|
}
|
||||||
|
d += emitLiteralGo(dst[d:], src[s:s+ll])
|
||||||
|
s += ll
|
||||||
|
uncompressed += ll
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if we are done...
|
||||||
|
if ml == lz4MinMatch {
|
||||||
|
if s == len(src) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// 0 bytes.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// 2 byte offset
|
||||||
|
if s >= len(src)-2 {
|
||||||
|
if debug {
|
||||||
|
fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2)
|
||||||
|
}
|
||||||
|
return nil, 0, ErrCorrupt
|
||||||
|
}
|
||||||
|
offset := binary.LittleEndian.Uint16(src[s:])
|
||||||
|
s += 2
|
||||||
|
if offset == 0 {
|
||||||
|
if debug {
|
||||||
|
fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s)
|
||||||
|
}
|
||||||
|
return nil, 0, ErrCorrupt
|
||||||
|
}
|
||||||
|
if int(offset) > uncompressed {
|
||||||
|
if debug {
|
||||||
|
fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed)
|
||||||
|
}
|
||||||
|
return nil, 0, ErrCorrupt
|
||||||
|
}
|
||||||
|
|
||||||
|
if ml == lz4MinMatch+15 {
|
||||||
|
for {
|
||||||
|
if s >= len(src) {
|
||||||
|
if debug {
|
||||||
|
fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
|
||||||
|
}
|
||||||
|
return nil, 0, ErrCorrupt
|
||||||
|
}
|
||||||
|
val := src[s]
|
||||||
|
s++
|
||||||
|
ml += int(val)
|
||||||
|
if val != 255 {
|
||||||
|
if s >= len(src) {
|
||||||
|
if debug {
|
||||||
|
fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
|
||||||
|
}
|
||||||
|
return nil, 0, ErrCorrupt
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if offset == lastOffset {
|
||||||
|
if debug {
|
||||||
|
fmt.Printf("emit repeat, length: %d, offset: %d\n", ml, offset)
|
||||||
|
}
|
||||||
|
if !inline {
|
||||||
|
d += emitRepeat16(dst[d:], offset, ml)
|
||||||
|
} else {
|
||||||
|
length := ml
|
||||||
|
dst := dst[d:]
|
||||||
|
for len(dst) > 5 {
|
||||||
|
// Repeat offset, make length cheaper
|
||||||
|
length -= 4
|
||||||
|
if length <= 4 {
|
||||||
|
dst[0] = uint8(length)<<2 | tagCopy1
|
||||||
|
dst[1] = 0
|
||||||
|
d += 2
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if length < 8 && offset < 2048 {
|
||||||
|
// Encode WITH offset
|
||||||
|
dst[1] = uint8(offset)
|
||||||
|
dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1
|
||||||
|
d += 2
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if length < (1<<8)+4 {
|
||||||
|
length -= 4
|
||||||
|
dst[2] = uint8(length)
|
||||||
|
dst[1] = 0
|
||||||
|
dst[0] = 5<<2 | tagCopy1
|
||||||
|
d += 3
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if length < (1<<16)+(1<<8) {
|
||||||
|
length -= 1 << 8
|
||||||
|
dst[3] = uint8(length >> 8)
|
||||||
|
dst[2] = uint8(length >> 0)
|
||||||
|
dst[1] = 0
|
||||||
|
dst[0] = 6<<2 | tagCopy1
|
||||||
|
d += 4
|
||||||
|
break
|
||||||
|
}
|
||||||
|
const maxRepeat = (1 << 24) - 1
|
||||||
|
length -= 1 << 16
|
||||||
|
left := 0
|
||||||
|
if length > maxRepeat {
|
||||||
|
left = length - maxRepeat + 4
|
||||||
|
length = maxRepeat - 4
|
||||||
|
}
|
||||||
|
dst[4] = uint8(length >> 16)
|
||||||
|
dst[3] = uint8(length >> 8)
|
||||||
|
dst[2] = uint8(length >> 0)
|
||||||
|
dst[1] = 0
|
||||||
|
dst[0] = 7<<2 | tagCopy1
|
||||||
|
if left > 0 {
|
||||||
|
d += 5 + emitRepeat16(dst[5:], offset, left)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
d += 5
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if debug {
|
||||||
|
fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset)
|
||||||
|
}
|
||||||
|
if !inline {
|
||||||
|
d += emitCopy16(dst[d:], offset, ml)
|
||||||
|
} else {
|
||||||
|
length := ml
|
||||||
|
dst := dst[d:]
|
||||||
|
for len(dst) > 5 {
|
||||||
|
// Offset no more than 2 bytes.
|
||||||
|
if length > 64 {
|
||||||
|
off := 3
|
||||||
|
if offset < 2048 {
|
||||||
|
// emit 8 bytes as tagCopy1, rest as repeats.
|
||||||
|
dst[1] = uint8(offset)
|
||||||
|
dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1
|
||||||
|
length -= 8
|
||||||
|
off = 2
|
||||||
|
} else {
|
||||||
|
// Emit a length 60 copy, encoded as 3 bytes.
|
||||||
|
// Emit remaining as repeat value (minimum 4 bytes).
|
||||||
|
dst[2] = uint8(offset >> 8)
|
||||||
|
dst[1] = uint8(offset)
|
||||||
|
dst[0] = 59<<2 | tagCopy2
|
||||||
|
length -= 60
|
||||||
|
}
|
||||||
|
// Emit remaining as repeats, at least 4 bytes remain.
|
||||||
|
d += off + emitRepeat16(dst[off:], offset, length)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if length >= 12 || offset >= 2048 {
|
||||||
|
// Emit the remaining copy, encoded as 3 bytes.
|
||||||
|
dst[2] = uint8(offset >> 8)
|
||||||
|
dst[1] = uint8(offset)
|
||||||
|
dst[0] = uint8(length-1)<<2 | tagCopy2
|
||||||
|
d += 3
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// Emit the remaining copy, encoded as 2 bytes.
|
||||||
|
dst[1] = uint8(offset)
|
||||||
|
dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
|
||||||
|
d += 2
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
lastOffset = offset
|
||||||
|
}
|
||||||
|
uncompressed += ml
|
||||||
|
if d > dLimit {
|
||||||
|
return nil, 0, ErrDstTooSmall
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return dst[:d], uncompressed, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConvertBlockSnappy will convert an LZ4s block and append it
|
||||||
|
// as a Snappy block without block length to dst.
|
||||||
|
// The uncompressed size is returned as well.
|
||||||
|
// dst must have capacity to contain the entire compressed block.
|
||||||
|
func (l *LZ4sConverter) ConvertBlockSnappy(dst, src []byte) ([]byte, int, error) {
|
||||||
|
if len(src) == 0 {
|
||||||
|
return dst, 0, nil
|
||||||
|
}
|
||||||
|
const debug = false
|
||||||
|
const lz4MinMatch = 3
|
||||||
|
|
||||||
|
s, d := 0, len(dst)
|
||||||
|
dst = dst[:cap(dst)]
|
||||||
|
// Use assembly when possible
|
||||||
|
if !debug && hasAmd64Asm {
|
||||||
|
res, sz := cvtLZ4sBlockSnappyAsm(dst[d:], src)
|
||||||
|
if res < 0 {
|
||||||
|
const (
|
||||||
|
errCorrupt = -1
|
||||||
|
errDstTooSmall = -2
|
||||||
|
)
|
||||||
|
switch res {
|
||||||
|
case errCorrupt:
|
||||||
|
return nil, 0, ErrCorrupt
|
||||||
|
case errDstTooSmall:
|
||||||
|
return nil, 0, ErrDstTooSmall
|
||||||
|
default:
|
||||||
|
return nil, 0, fmt.Errorf("unexpected result: %d", res)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if d+sz > len(dst) {
|
||||||
|
return nil, 0, ErrDstTooSmall
|
||||||
|
}
|
||||||
|
return dst[:d+sz], res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
dLimit := len(dst) - 10
|
||||||
|
var uncompressed int
|
||||||
|
if debug {
|
||||||
|
fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst))
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
if s >= len(src) {
|
||||||
|
return nil, 0, ErrCorrupt
|
||||||
|
}
|
||||||
|
// Read literal info
|
||||||
|
token := src[s]
|
||||||
|
ll := int(token >> 4)
|
||||||
|
ml := int(lz4MinMatch + (token & 0xf))
|
||||||
|
|
||||||
|
// If upper nibble is 15, literal length is extended
|
||||||
|
if token >= 0xf0 {
|
||||||
|
for {
|
||||||
|
s++
|
||||||
|
if s >= len(src) {
|
||||||
|
if debug {
|
||||||
|
fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src))
|
||||||
|
}
|
||||||
|
return nil, 0, ErrCorrupt
|
||||||
|
}
|
||||||
|
val := src[s]
|
||||||
|
ll += int(val)
|
||||||
|
if val != 255 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Skip past token
|
||||||
|
if s+ll >= len(src) {
|
||||||
|
if debug {
|
||||||
|
fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src))
|
||||||
|
}
|
||||||
|
return nil, 0, ErrCorrupt
|
||||||
|
}
|
||||||
|
s++
|
||||||
|
if ll > 0 {
|
||||||
|
if d+ll > dLimit {
|
||||||
|
return nil, 0, ErrDstTooSmall
|
||||||
|
}
|
||||||
|
if debug {
|
||||||
|
fmt.Printf("emit %d literals\n", ll)
|
||||||
|
}
|
||||||
|
d += emitLiteralGo(dst[d:], src[s:s+ll])
|
||||||
|
s += ll
|
||||||
|
uncompressed += ll
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if we are done...
|
||||||
|
if ml == lz4MinMatch {
|
||||||
|
if s == len(src) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// 0 bytes.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// 2 byte offset
|
||||||
|
if s >= len(src)-2 {
|
||||||
|
if debug {
|
||||||
|
fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2)
|
||||||
|
}
|
||||||
|
return nil, 0, ErrCorrupt
|
||||||
|
}
|
||||||
|
offset := binary.LittleEndian.Uint16(src[s:])
|
||||||
|
s += 2
|
||||||
|
if offset == 0 {
|
||||||
|
if debug {
|
||||||
|
fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s)
|
||||||
|
}
|
||||||
|
return nil, 0, ErrCorrupt
|
||||||
|
}
|
||||||
|
if int(offset) > uncompressed {
|
||||||
|
if debug {
|
||||||
|
fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed)
|
||||||
|
}
|
||||||
|
return nil, 0, ErrCorrupt
|
||||||
|
}
|
||||||
|
|
||||||
|
if ml == lz4MinMatch+15 {
|
||||||
|
for {
|
||||||
|
if s >= len(src) {
|
||||||
|
if debug {
|
||||||
|
fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
|
||||||
|
}
|
||||||
|
return nil, 0, ErrCorrupt
|
||||||
|
}
|
||||||
|
val := src[s]
|
||||||
|
s++
|
||||||
|
ml += int(val)
|
||||||
|
if val != 255 {
|
||||||
|
if s >= len(src) {
|
||||||
|
if debug {
|
||||||
|
fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
|
||||||
|
}
|
||||||
|
return nil, 0, ErrCorrupt
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if debug {
|
||||||
|
fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset)
|
||||||
|
}
|
||||||
|
length := ml
|
||||||
|
// d += emitCopyNoRepeat(dst[d:], int(offset), ml)
|
||||||
|
for length > 0 {
|
||||||
|
if d >= dLimit {
|
||||||
|
return nil, 0, ErrDstTooSmall
|
||||||
|
}
|
||||||
|
|
||||||
|
// Offset no more than 2 bytes.
|
||||||
|
if length > 64 {
|
||||||
|
// Emit a length 64 copy, encoded as 3 bytes.
|
||||||
|
dst[d+2] = uint8(offset >> 8)
|
||||||
|
dst[d+1] = uint8(offset)
|
||||||
|
dst[d+0] = 63<<2 | tagCopy2
|
||||||
|
length -= 64
|
||||||
|
d += 3
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if length >= 12 || offset >= 2048 || length < 4 {
|
||||||
|
// Emit the remaining copy, encoded as 3 bytes.
|
||||||
|
dst[d+2] = uint8(offset >> 8)
|
||||||
|
dst[d+1] = uint8(offset)
|
||||||
|
dst[d+0] = uint8(length-1)<<2 | tagCopy2
|
||||||
|
d += 3
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// Emit the remaining copy, encoded as 2 bytes.
|
||||||
|
dst[d+1] = uint8(offset)
|
||||||
|
dst[d+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
|
||||||
|
d += 2
|
||||||
|
break
|
||||||
|
}
|
||||||
|
uncompressed += ml
|
||||||
|
if d > dLimit {
|
||||||
|
return nil, 0, ErrDstTooSmall
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return dst[:d], uncompressed, nil
|
||||||
|
}
|
1062
vendor/github.com/klauspost/compress/s2/reader.go
generated
vendored
Normal file
1062
vendor/github.com/klauspost/compress/s2/reader.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1020
vendor/github.com/klauspost/compress/s2/writer.go
generated
vendored
Normal file
1020
vendor/github.com/klauspost/compress/s2/writer.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
2
vendor/github.com/klauspost/compress/zstd/README.md
generated
vendored
2
vendor/github.com/klauspost/compress/zstd/README.md
generated
vendored
@ -304,7 +304,7 @@ import "github.com/klauspost/compress/zstd"
|
|||||||
|
|
||||||
// Create a reader that caches decompressors.
|
// Create a reader that caches decompressors.
|
||||||
// For this operation type we supply a nil Reader.
|
// For this operation type we supply a nil Reader.
|
||||||
var decoder, _ = zstd.NewReader(nil, WithDecoderConcurrency(0))
|
var decoder, _ = zstd.NewReader(nil, zstd.WithDecoderConcurrency(0))
|
||||||
|
|
||||||
// Decompress a buffer. We don't supply a destination buffer,
|
// Decompress a buffer. We don't supply a destination buffer,
|
||||||
// so it will be allocated by the decoder.
|
// so it will be allocated by the decoder.
|
||||||
|
34
vendor/github.com/klauspost/compress/zstd/bitreader.go
generated
vendored
34
vendor/github.com/klauspost/compress/zstd/bitreader.go
generated
vendored
@ -17,7 +17,6 @@ import (
|
|||||||
// for aligning the input.
|
// for aligning the input.
|
||||||
type bitReader struct {
|
type bitReader struct {
|
||||||
in []byte
|
in []byte
|
||||||
off uint // next byte to read is at in[off - 1]
|
|
||||||
value uint64 // Maybe use [16]byte, but shifting is awkward.
|
value uint64 // Maybe use [16]byte, but shifting is awkward.
|
||||||
bitsRead uint8
|
bitsRead uint8
|
||||||
}
|
}
|
||||||
@ -28,7 +27,6 @@ func (b *bitReader) init(in []byte) error {
|
|||||||
return errors.New("corrupt stream: too short")
|
return errors.New("corrupt stream: too short")
|
||||||
}
|
}
|
||||||
b.in = in
|
b.in = in
|
||||||
b.off = uint(len(in))
|
|
||||||
// The highest bit of the last byte indicates where to start
|
// The highest bit of the last byte indicates where to start
|
||||||
v := in[len(in)-1]
|
v := in[len(in)-1]
|
||||||
if v == 0 {
|
if v == 0 {
|
||||||
@ -69,21 +67,19 @@ func (b *bitReader) fillFast() {
|
|||||||
if b.bitsRead < 32 {
|
if b.bitsRead < 32 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// 2 bounds checks.
|
v := b.in[len(b.in)-4:]
|
||||||
v := b.in[b.off-4:]
|
b.in = b.in[:len(b.in)-4]
|
||||||
v = v[:4]
|
|
||||||
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||||
b.value = (b.value << 32) | uint64(low)
|
b.value = (b.value << 32) | uint64(low)
|
||||||
b.bitsRead -= 32
|
b.bitsRead -= 32
|
||||||
b.off -= 4
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read.
|
// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read.
|
||||||
func (b *bitReader) fillFastStart() {
|
func (b *bitReader) fillFastStart() {
|
||||||
// Do single re-slice to avoid bounds checks.
|
v := b.in[len(b.in)-8:]
|
||||||
b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
|
b.in = b.in[:len(b.in)-8]
|
||||||
|
b.value = binary.LittleEndian.Uint64(v)
|
||||||
b.bitsRead = 0
|
b.bitsRead = 0
|
||||||
b.off -= 8
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// fill() will make sure at least 32 bits are available.
|
// fill() will make sure at least 32 bits are available.
|
||||||
@ -91,25 +87,25 @@ func (b *bitReader) fill() {
|
|||||||
if b.bitsRead < 32 {
|
if b.bitsRead < 32 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if b.off >= 4 {
|
if len(b.in) >= 4 {
|
||||||
v := b.in[b.off-4:]
|
v := b.in[len(b.in)-4:]
|
||||||
v = v[:4]
|
b.in = b.in[:len(b.in)-4]
|
||||||
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||||
b.value = (b.value << 32) | uint64(low)
|
b.value = (b.value << 32) | uint64(low)
|
||||||
b.bitsRead -= 32
|
b.bitsRead -= 32
|
||||||
b.off -= 4
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
for b.off > 0 {
|
|
||||||
b.value = (b.value << 8) | uint64(b.in[b.off-1])
|
b.bitsRead -= uint8(8 * len(b.in))
|
||||||
b.bitsRead -= 8
|
for len(b.in) > 0 {
|
||||||
b.off--
|
b.value = (b.value << 8) | uint64(b.in[len(b.in)-1])
|
||||||
|
b.in = b.in[:len(b.in)-1]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// finished returns true if all bits have been read from the bit stream.
|
// finished returns true if all bits have been read from the bit stream.
|
||||||
func (b *bitReader) finished() bool {
|
func (b *bitReader) finished() bool {
|
||||||
return b.off == 0 && b.bitsRead >= 64
|
return len(b.in) == 0 && b.bitsRead >= 64
|
||||||
}
|
}
|
||||||
|
|
||||||
// overread returns true if more bits have been requested than is on the stream.
|
// overread returns true if more bits have been requested than is on the stream.
|
||||||
@ -119,7 +115,7 @@ func (b *bitReader) overread() bool {
|
|||||||
|
|
||||||
// remain returns the number of bits remaining.
|
// remain returns the number of bits remaining.
|
||||||
func (b *bitReader) remain() uint {
|
func (b *bitReader) remain() uint {
|
||||||
return b.off*8 + 64 - uint(b.bitsRead)
|
return 8*uint(len(b.in)) + 64 - uint(b.bitsRead)
|
||||||
}
|
}
|
||||||
|
|
||||||
// close the bitstream and returns an error if out-of-buffer reads occurred.
|
// close the bitstream and returns an error if out-of-buffer reads occurred.
|
||||||
|
3
vendor/github.com/klauspost/compress/zstd/bitwriter.go
generated
vendored
3
vendor/github.com/klauspost/compress/zstd/bitwriter.go
generated
vendored
@ -97,12 +97,11 @@ func (b *bitWriter) flushAlign() {
|
|||||||
|
|
||||||
// close will write the alignment bit and write the final byte(s)
|
// close will write the alignment bit and write the final byte(s)
|
||||||
// to the output.
|
// to the output.
|
||||||
func (b *bitWriter) close() error {
|
func (b *bitWriter) close() {
|
||||||
// End mark
|
// End mark
|
||||||
b.addBits16Clean(1, 1)
|
b.addBits16Clean(1, 1)
|
||||||
// flush until next byte.
|
// flush until next byte.
|
||||||
b.flushAlign()
|
b.flushAlign()
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// reset and continue writing by appending to out.
|
// reset and continue writing by appending to out.
|
||||||
|
6
vendor/github.com/klauspost/compress/zstd/blockdec.go
generated
vendored
6
vendor/github.com/klauspost/compress/zstd/blockdec.go
generated
vendored
@ -9,6 +9,7 @@ import (
|
|||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"hash/crc32"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@ -442,6 +443,9 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
var err error
|
var err error
|
||||||
|
if debugDecoder {
|
||||||
|
println("huff table input:", len(literals), "CRC:", crc32.ChecksumIEEE(literals))
|
||||||
|
}
|
||||||
huff, literals, err = huff0.ReadTable(literals, huff)
|
huff, literals, err = huff0.ReadTable(literals, huff)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
println("reading huffman table:", err)
|
println("reading huffman table:", err)
|
||||||
@ -588,7 +592,7 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
|
|||||||
}
|
}
|
||||||
seq.fse.setRLE(symb)
|
seq.fse.setRLE(symb)
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
printf("RLE set to %+v, code: %v", symb, v)
|
printf("RLE set to 0x%x, code: %v", symb, v)
|
||||||
}
|
}
|
||||||
case compModeFSE:
|
case compModeFSE:
|
||||||
println("Reading table for", tableIndex(i))
|
println("Reading table for", tableIndex(i))
|
||||||
|
38
vendor/github.com/klauspost/compress/zstd/blockenc.go
generated
vendored
38
vendor/github.com/klauspost/compress/zstd/blockenc.go
generated
vendored
@ -361,14 +361,21 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error {
|
|||||||
if len(lits) >= 1024 {
|
if len(lits) >= 1024 {
|
||||||
// Use 4 Streams.
|
// Use 4 Streams.
|
||||||
out, reUsed, err = huff0.Compress4X(lits, b.litEnc)
|
out, reUsed, err = huff0.Compress4X(lits, b.litEnc)
|
||||||
} else if len(lits) > 32 {
|
} else if len(lits) > 16 {
|
||||||
// Use 1 stream
|
// Use 1 stream
|
||||||
single = true
|
single = true
|
||||||
out, reUsed, err = huff0.Compress1X(lits, b.litEnc)
|
out, reUsed, err = huff0.Compress1X(lits, b.litEnc)
|
||||||
} else {
|
} else {
|
||||||
err = huff0.ErrIncompressible
|
err = huff0.ErrIncompressible
|
||||||
}
|
}
|
||||||
|
if err == nil && len(out)+5 > len(lits) {
|
||||||
|
// If we are close, we may still be worse or equal to raw.
|
||||||
|
var lh literalsHeader
|
||||||
|
lh.setSizes(len(out), len(lits), single)
|
||||||
|
if len(out)+lh.size() >= len(lits) {
|
||||||
|
err = huff0.ErrIncompressible
|
||||||
|
}
|
||||||
|
}
|
||||||
switch err {
|
switch err {
|
||||||
case huff0.ErrIncompressible:
|
case huff0.ErrIncompressible:
|
||||||
if debugEncoder {
|
if debugEncoder {
|
||||||
@ -473,7 +480,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
|
|||||||
return b.encodeLits(b.literals, rawAllLits)
|
return b.encodeLits(b.literals, rawAllLits)
|
||||||
}
|
}
|
||||||
// We want some difference to at least account for the headers.
|
// We want some difference to at least account for the headers.
|
||||||
saved := b.size - len(b.literals) - (b.size >> 5)
|
saved := b.size - len(b.literals) - (b.size >> 6)
|
||||||
if saved < 16 {
|
if saved < 16 {
|
||||||
if org == nil {
|
if org == nil {
|
||||||
return errIncompressible
|
return errIncompressible
|
||||||
@ -503,7 +510,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
|
|||||||
if len(b.literals) >= 1024 && !raw {
|
if len(b.literals) >= 1024 && !raw {
|
||||||
// Use 4 Streams.
|
// Use 4 Streams.
|
||||||
out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc)
|
out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc)
|
||||||
} else if len(b.literals) > 32 && !raw {
|
} else if len(b.literals) > 16 && !raw {
|
||||||
// Use 1 stream
|
// Use 1 stream
|
||||||
single = true
|
single = true
|
||||||
out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc)
|
out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc)
|
||||||
@ -511,6 +518,17 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
|
|||||||
err = huff0.ErrIncompressible
|
err = huff0.ErrIncompressible
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err == nil && len(out)+5 > len(b.literals) {
|
||||||
|
// If we are close, we may still be worse or equal to raw.
|
||||||
|
var lh literalsHeader
|
||||||
|
lh.setSize(len(b.literals))
|
||||||
|
szRaw := lh.size()
|
||||||
|
lh.setSizes(len(out), len(b.literals), single)
|
||||||
|
szComp := lh.size()
|
||||||
|
if len(out)+szComp >= len(b.literals)+szRaw {
|
||||||
|
err = huff0.ErrIncompressible
|
||||||
|
}
|
||||||
|
}
|
||||||
switch err {
|
switch err {
|
||||||
case huff0.ErrIncompressible:
|
case huff0.ErrIncompressible:
|
||||||
lh.setType(literalsBlockRaw)
|
lh.setType(literalsBlockRaw)
|
||||||
@ -773,16 +791,16 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
|
|||||||
ml.flush(mlEnc.actualTableLog)
|
ml.flush(mlEnc.actualTableLog)
|
||||||
of.flush(ofEnc.actualTableLog)
|
of.flush(ofEnc.actualTableLog)
|
||||||
ll.flush(llEnc.actualTableLog)
|
ll.flush(llEnc.actualTableLog)
|
||||||
err = wr.close()
|
wr.close()
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
b.output = wr.out
|
b.output = wr.out
|
||||||
|
|
||||||
|
// Maybe even add a bigger margin.
|
||||||
if len(b.output)-3-bhOffset >= b.size {
|
if len(b.output)-3-bhOffset >= b.size {
|
||||||
// Maybe even add a bigger margin.
|
// Discard and encode as raw block.
|
||||||
|
b.output = b.encodeRawTo(b.output[:bhOffset], org)
|
||||||
|
b.popOffsets()
|
||||||
b.litEnc.Reuse = huff0.ReusePolicyNone
|
b.litEnc.Reuse = huff0.ReusePolicyNone
|
||||||
return errIncompressible
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Size is output minus block header.
|
// Size is output minus block header.
|
||||||
|
4
vendor/github.com/klauspost/compress/zstd/bytebuf.go
generated
vendored
4
vendor/github.com/klauspost/compress/zstd/bytebuf.go
generated
vendored
@ -54,7 +54,7 @@ func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) {
|
|||||||
func (b *byteBuf) readByte() (byte, error) {
|
func (b *byteBuf) readByte() (byte, error) {
|
||||||
bb := *b
|
bb := *b
|
||||||
if len(bb) < 1 {
|
if len(bb) < 1 {
|
||||||
return 0, nil
|
return 0, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
r := bb[0]
|
r := bb[0]
|
||||||
*b = bb[1:]
|
*b = bb[1:]
|
||||||
@ -109,7 +109,7 @@ func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *readerWrapper) readByte() (byte, error) {
|
func (r *readerWrapper) readByte() (byte, error) {
|
||||||
n2, err := r.r.Read(r.tmp[:1])
|
n2, err := io.ReadFull(r.r, r.tmp[:1])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
err = io.ErrUnexpectedEOF
|
err = io.ErrUnexpectedEOF
|
||||||
|
7
vendor/github.com/klauspost/compress/zstd/decoder.go
generated
vendored
7
vendor/github.com/klauspost/compress/zstd/decoder.go
generated
vendored
@ -455,12 +455,7 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(next.b) > 0 {
|
if len(next.b) > 0 {
|
||||||
n, err := d.current.crc.Write(next.b)
|
d.current.crc.Write(next.b)
|
||||||
if err == nil {
|
|
||||||
if n != len(next.b) {
|
|
||||||
d.current.err = io.ErrShortWrite
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if next.err == nil && next.d != nil && next.d.hasCRC {
|
if next.err == nil && next.d != nil && next.d.hasCRC {
|
||||||
got := uint32(d.current.crc.Sum64())
|
got := uint32(d.current.crc.Sum64())
|
||||||
|
2
vendor/github.com/klauspost/compress/zstd/decoder_options.go
generated
vendored
2
vendor/github.com/klauspost/compress/zstd/decoder_options.go
generated
vendored
@ -107,7 +107,7 @@ func WithDecoderDicts(dicts ...[]byte) DOption {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithEncoderDictRaw registers a dictionary that may be used by the decoder.
|
// WithDecoderDictRaw registers a dictionary that may be used by the decoder.
|
||||||
// The slice content can be arbitrary data.
|
// The slice content can be arbitrary data.
|
||||||
func WithDecoderDictRaw(id uint32, content []byte) DOption {
|
func WithDecoderDictRaw(id uint32, content []byte) DOption {
|
||||||
return func(o *decoderOptions) error {
|
return func(o *decoderOptions) error {
|
||||||
|
379
vendor/github.com/klauspost/compress/zstd/dict.go
generated
vendored
379
vendor/github.com/klauspost/compress/zstd/dict.go
generated
vendored
@ -1,10 +1,13 @@
|
|||||||
package zstd
|
package zstd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"math"
|
||||||
|
"sort"
|
||||||
|
|
||||||
"github.com/klauspost/compress/huff0"
|
"github.com/klauspost/compress/huff0"
|
||||||
)
|
)
|
||||||
@ -14,9 +17,8 @@ type dict struct {
|
|||||||
|
|
||||||
litEnc *huff0.Scratch
|
litEnc *huff0.Scratch
|
||||||
llDec, ofDec, mlDec sequenceDec
|
llDec, ofDec, mlDec sequenceDec
|
||||||
//llEnc, ofEnc, mlEnc []*fseEncoder
|
offsets [3]int
|
||||||
offsets [3]int
|
content []byte
|
||||||
content []byte
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const dictMagic = "\x37\xa4\x30\xec"
|
const dictMagic = "\x37\xa4\x30\xec"
|
||||||
@ -159,3 +161,374 @@ func InspectDictionary(b []byte) (interface {
|
|||||||
d, err := loadDict(b)
|
d, err := loadDict(b)
|
||||||
return d, err
|
return d, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type BuildDictOptions struct {
|
||||||
|
// Dictionary ID.
|
||||||
|
ID uint32
|
||||||
|
|
||||||
|
// Content to use to create dictionary tables.
|
||||||
|
Contents [][]byte
|
||||||
|
|
||||||
|
// History to use for all blocks.
|
||||||
|
History []byte
|
||||||
|
|
||||||
|
// Offsets to use.
|
||||||
|
Offsets [3]int
|
||||||
|
|
||||||
|
// CompatV155 will make the dictionary compatible with Zstd v1.5.5 and earlier.
|
||||||
|
// See https://github.com/facebook/zstd/issues/3724
|
||||||
|
CompatV155 bool
|
||||||
|
|
||||||
|
// Use the specified encoder level.
|
||||||
|
// The dictionary will be built using the specified encoder level,
|
||||||
|
// which will reflect speed and make the dictionary tailored for that level.
|
||||||
|
// If not set SpeedBestCompression will be used.
|
||||||
|
Level EncoderLevel
|
||||||
|
|
||||||
|
// DebugOut will write stats and other details here if set.
|
||||||
|
DebugOut io.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
func BuildDict(o BuildDictOptions) ([]byte, error) {
|
||||||
|
initPredefined()
|
||||||
|
hist := o.History
|
||||||
|
contents := o.Contents
|
||||||
|
debug := o.DebugOut != nil
|
||||||
|
println := func(args ...interface{}) {
|
||||||
|
if o.DebugOut != nil {
|
||||||
|
fmt.Fprintln(o.DebugOut, args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
printf := func(s string, args ...interface{}) {
|
||||||
|
if o.DebugOut != nil {
|
||||||
|
fmt.Fprintf(o.DebugOut, s, args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
print := func(args ...interface{}) {
|
||||||
|
if o.DebugOut != nil {
|
||||||
|
fmt.Fprint(o.DebugOut, args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if int64(len(hist)) > dictMaxLength {
|
||||||
|
return nil, fmt.Errorf("dictionary of size %d > %d", len(hist), int64(dictMaxLength))
|
||||||
|
}
|
||||||
|
if len(hist) < 8 {
|
||||||
|
return nil, fmt.Errorf("dictionary of size %d < %d", len(hist), 8)
|
||||||
|
}
|
||||||
|
if len(contents) == 0 {
|
||||||
|
return nil, errors.New("no content provided")
|
||||||
|
}
|
||||||
|
d := dict{
|
||||||
|
id: o.ID,
|
||||||
|
litEnc: nil,
|
||||||
|
llDec: sequenceDec{},
|
||||||
|
ofDec: sequenceDec{},
|
||||||
|
mlDec: sequenceDec{},
|
||||||
|
offsets: o.Offsets,
|
||||||
|
content: hist,
|
||||||
|
}
|
||||||
|
block := blockEnc{lowMem: false}
|
||||||
|
block.init()
|
||||||
|
enc := encoder(&bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(maxMatchLen), bufferReset: math.MaxInt32 - int32(maxMatchLen*2), lowMem: false}})
|
||||||
|
if o.Level != 0 {
|
||||||
|
eOpts := encoderOptions{
|
||||||
|
level: o.Level,
|
||||||
|
blockSize: maxMatchLen,
|
||||||
|
windowSize: maxMatchLen,
|
||||||
|
dict: &d,
|
||||||
|
lowMem: false,
|
||||||
|
}
|
||||||
|
enc = eOpts.encoder()
|
||||||
|
} else {
|
||||||
|
o.Level = SpeedBestCompression
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
remain [256]int
|
||||||
|
ll [256]int
|
||||||
|
ml [256]int
|
||||||
|
of [256]int
|
||||||
|
)
|
||||||
|
addValues := func(dst *[256]int, src []byte) {
|
||||||
|
for _, v := range src {
|
||||||
|
dst[v]++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
addHist := func(dst *[256]int, src *[256]uint32) {
|
||||||
|
for i, v := range src {
|
||||||
|
dst[i] += int(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
seqs := 0
|
||||||
|
nUsed := 0
|
||||||
|
litTotal := 0
|
||||||
|
newOffsets := make(map[uint32]int, 1000)
|
||||||
|
for _, b := range contents {
|
||||||
|
block.reset(nil)
|
||||||
|
if len(b) < 8 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
nUsed++
|
||||||
|
enc.Reset(&d, true)
|
||||||
|
enc.Encode(&block, b)
|
||||||
|
addValues(&remain, block.literals)
|
||||||
|
litTotal += len(block.literals)
|
||||||
|
seqs += len(block.sequences)
|
||||||
|
block.genCodes()
|
||||||
|
addHist(&ll, block.coders.llEnc.Histogram())
|
||||||
|
addHist(&ml, block.coders.mlEnc.Histogram())
|
||||||
|
addHist(&of, block.coders.ofEnc.Histogram())
|
||||||
|
for i, seq := range block.sequences {
|
||||||
|
if i > 3 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
offset := seq.offset
|
||||||
|
if offset == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if offset > 3 {
|
||||||
|
newOffsets[offset-3]++
|
||||||
|
} else {
|
||||||
|
newOffsets[uint32(o.Offsets[offset-1])]++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Find most used offsets.
|
||||||
|
var sortedOffsets []uint32
|
||||||
|
for k := range newOffsets {
|
||||||
|
sortedOffsets = append(sortedOffsets, k)
|
||||||
|
}
|
||||||
|
sort.Slice(sortedOffsets, func(i, j int) bool {
|
||||||
|
a, b := sortedOffsets[i], sortedOffsets[j]
|
||||||
|
if a == b {
|
||||||
|
// Prefer the longer offset
|
||||||
|
return sortedOffsets[i] > sortedOffsets[j]
|
||||||
|
}
|
||||||
|
return newOffsets[sortedOffsets[i]] > newOffsets[sortedOffsets[j]]
|
||||||
|
})
|
||||||
|
if len(sortedOffsets) > 3 {
|
||||||
|
if debug {
|
||||||
|
print("Offsets:")
|
||||||
|
for i, v := range sortedOffsets {
|
||||||
|
if i > 20 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
printf("[%d: %d],", v, newOffsets[v])
|
||||||
|
}
|
||||||
|
println("")
|
||||||
|
}
|
||||||
|
|
||||||
|
sortedOffsets = sortedOffsets[:3]
|
||||||
|
}
|
||||||
|
for i, v := range sortedOffsets {
|
||||||
|
o.Offsets[i] = int(v)
|
||||||
|
}
|
||||||
|
if debug {
|
||||||
|
println("New repeat offsets", o.Offsets)
|
||||||
|
}
|
||||||
|
|
||||||
|
if nUsed == 0 || seqs == 0 {
|
||||||
|
return nil, fmt.Errorf("%d blocks, %d sequences found", nUsed, seqs)
|
||||||
|
}
|
||||||
|
if debug {
|
||||||
|
println("Sequences:", seqs, "Blocks:", nUsed, "Literals:", litTotal)
|
||||||
|
}
|
||||||
|
if seqs/nUsed < 512 {
|
||||||
|
// Use 512 as minimum.
|
||||||
|
nUsed = seqs / 512
|
||||||
|
}
|
||||||
|
copyHist := func(dst *fseEncoder, src *[256]int) ([]byte, error) {
|
||||||
|
hist := dst.Histogram()
|
||||||
|
var maxSym uint8
|
||||||
|
var maxCount int
|
||||||
|
var fakeLength int
|
||||||
|
for i, v := range src {
|
||||||
|
if v > 0 {
|
||||||
|
v = v / nUsed
|
||||||
|
if v == 0 {
|
||||||
|
v = 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v > maxCount {
|
||||||
|
maxCount = v
|
||||||
|
}
|
||||||
|
if v != 0 {
|
||||||
|
maxSym = uint8(i)
|
||||||
|
}
|
||||||
|
fakeLength += v
|
||||||
|
hist[i] = uint32(v)
|
||||||
|
}
|
||||||
|
dst.HistogramFinished(maxSym, maxCount)
|
||||||
|
dst.reUsed = false
|
||||||
|
dst.useRLE = false
|
||||||
|
err := dst.normalizeCount(fakeLength)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if debug {
|
||||||
|
println("RAW:", dst.count[:maxSym+1], "NORM:", dst.norm[:maxSym+1], "LEN:", fakeLength)
|
||||||
|
}
|
||||||
|
return dst.writeCount(nil)
|
||||||
|
}
|
||||||
|
if debug {
|
||||||
|
print("Literal lengths: ")
|
||||||
|
}
|
||||||
|
llTable, err := copyHist(block.coders.llEnc, &ll)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if debug {
|
||||||
|
print("Match lengths: ")
|
||||||
|
}
|
||||||
|
mlTable, err := copyHist(block.coders.mlEnc, &ml)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if debug {
|
||||||
|
print("Offsets: ")
|
||||||
|
}
|
||||||
|
ofTable, err := copyHist(block.coders.ofEnc, &of)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Literal table
|
||||||
|
avgSize := litTotal
|
||||||
|
if avgSize > huff0.BlockSizeMax/2 {
|
||||||
|
avgSize = huff0.BlockSizeMax / 2
|
||||||
|
}
|
||||||
|
huffBuff := make([]byte, 0, avgSize)
|
||||||
|
// Target size
|
||||||
|
div := litTotal / avgSize
|
||||||
|
if div < 1 {
|
||||||
|
div = 1
|
||||||
|
}
|
||||||
|
if debug {
|
||||||
|
println("Huffman weights:")
|
||||||
|
}
|
||||||
|
for i, n := range remain[:] {
|
||||||
|
if n > 0 {
|
||||||
|
n = n / div
|
||||||
|
// Allow all entries to be represented.
|
||||||
|
if n == 0 {
|
||||||
|
n = 1
|
||||||
|
}
|
||||||
|
huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...)
|
||||||
|
if debug {
|
||||||
|
printf("[%d: %d], ", i, n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if o.CompatV155 && remain[255]/div == 0 {
|
||||||
|
huffBuff = append(huffBuff, 255)
|
||||||
|
}
|
||||||
|
scratch := &huff0.Scratch{TableLog: 11}
|
||||||
|
for tries := 0; tries < 255; tries++ {
|
||||||
|
scratch = &huff0.Scratch{TableLog: 11}
|
||||||
|
_, _, err = huff0.Compress1X(huffBuff, scratch)
|
||||||
|
if err == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if debug {
|
||||||
|
printf("Try %d: Huffman error: %v\n", tries+1, err)
|
||||||
|
}
|
||||||
|
huffBuff = huffBuff[:0]
|
||||||
|
if tries == 250 {
|
||||||
|
if debug {
|
||||||
|
println("Huffman: Bailing out with predefined table")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bail out.... Just generate something
|
||||||
|
huffBuff = append(huffBuff, bytes.Repeat([]byte{255}, 10000)...)
|
||||||
|
for i := 0; i < 128; i++ {
|
||||||
|
huffBuff = append(huffBuff, byte(i))
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if errors.Is(err, huff0.ErrIncompressible) {
|
||||||
|
// Try truncating least common.
|
||||||
|
for i, n := range remain[:] {
|
||||||
|
if n > 0 {
|
||||||
|
n = n / (div * (i + 1))
|
||||||
|
if n > 0 {
|
||||||
|
huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if o.CompatV155 && len(huffBuff) > 0 && huffBuff[len(huffBuff)-1] != 255 {
|
||||||
|
huffBuff = append(huffBuff, 255)
|
||||||
|
}
|
||||||
|
if len(huffBuff) == 0 {
|
||||||
|
huffBuff = append(huffBuff, 0, 255)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if errors.Is(err, huff0.ErrUseRLE) {
|
||||||
|
for i, n := range remain[:] {
|
||||||
|
n = n / (div * (i + 1))
|
||||||
|
// Allow all entries to be represented.
|
||||||
|
if n == 0 {
|
||||||
|
n = 1
|
||||||
|
}
|
||||||
|
huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var out bytes.Buffer
|
||||||
|
out.Write([]byte(dictMagic))
|
||||||
|
out.Write(binary.LittleEndian.AppendUint32(nil, o.ID))
|
||||||
|
out.Write(scratch.OutTable)
|
||||||
|
if debug {
|
||||||
|
println("huff table:", len(scratch.OutTable), "bytes")
|
||||||
|
println("of table:", len(ofTable), "bytes")
|
||||||
|
println("ml table:", len(mlTable), "bytes")
|
||||||
|
println("ll table:", len(llTable), "bytes")
|
||||||
|
}
|
||||||
|
out.Write(ofTable)
|
||||||
|
out.Write(mlTable)
|
||||||
|
out.Write(llTable)
|
||||||
|
out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[0])))
|
||||||
|
out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[1])))
|
||||||
|
out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[2])))
|
||||||
|
out.Write(hist)
|
||||||
|
if debug {
|
||||||
|
_, err := loadDict(out.Bytes())
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
i, err := InspectDictionary(out.Bytes())
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
println("ID:", i.ID())
|
||||||
|
println("Content size:", i.ContentSize())
|
||||||
|
println("Encoder:", i.LitEncoder() != nil)
|
||||||
|
println("Offsets:", i.Offsets())
|
||||||
|
var totalSize int
|
||||||
|
for _, b := range contents {
|
||||||
|
totalSize += len(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
encWith := func(opts ...EOption) int {
|
||||||
|
enc, err := NewWriter(nil, opts...)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
defer enc.Close()
|
||||||
|
var dst []byte
|
||||||
|
var totalSize int
|
||||||
|
for _, b := range contents {
|
||||||
|
dst = enc.EncodeAll(b, dst[:0])
|
||||||
|
totalSize += len(dst)
|
||||||
|
}
|
||||||
|
return totalSize
|
||||||
|
}
|
||||||
|
plain := encWith(WithEncoderLevel(o.Level))
|
||||||
|
withDict := encWith(WithEncoderLevel(o.Level), WithEncoderDict(out.Bytes()))
|
||||||
|
println("Input size:", totalSize)
|
||||||
|
println("Plain Compressed:", plain)
|
||||||
|
println("Dict Compressed:", withDict)
|
||||||
|
println("Saved:", plain-withDict, (plain-withDict)/len(contents), "bytes per input (rounded down)")
|
||||||
|
}
|
||||||
|
return out.Bytes(), nil
|
||||||
|
}
|
||||||
|
1
vendor/github.com/klauspost/compress/zstd/enc_base.go
generated
vendored
1
vendor/github.com/klauspost/compress/zstd/enc_base.go
generated
vendored
@ -144,6 +144,7 @@ func (e *fastBase) resetBase(d *dict, singleBlock bool) {
|
|||||||
} else {
|
} else {
|
||||||
e.crc.Reset()
|
e.crc.Reset()
|
||||||
}
|
}
|
||||||
|
e.blk.dictLitEnc = nil
|
||||||
if d != nil {
|
if d != nil {
|
||||||
low := e.lowMem
|
low := e.lowMem
|
||||||
if singleBlock {
|
if singleBlock {
|
||||||
|
266
vendor/github.com/klauspost/compress/zstd/enc_best.go
generated
vendored
266
vendor/github.com/klauspost/compress/zstd/enc_best.go
generated
vendored
@ -32,10 +32,9 @@ type match struct {
|
|||||||
length int32
|
length int32
|
||||||
rep int32
|
rep int32
|
||||||
est int32
|
est int32
|
||||||
_ [12]byte // Aligned size to cache line: 4+4+4+4+4 bytes + 12 bytes padding = 32 bytes
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const highScore = 25000
|
const highScore = maxMatchLen * 8
|
||||||
|
|
||||||
// estBits will estimate output bits from predefined tables.
|
// estBits will estimate output bits from predefined tables.
|
||||||
func (m *match) estBits(bitsPerByte int32) {
|
func (m *match) estBits(bitsPerByte int32) {
|
||||||
@ -160,7 +159,6 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) {
|
|||||||
|
|
||||||
// nextEmit is where in src the next emitLiteral should start from.
|
// nextEmit is where in src the next emitLiteral should start from.
|
||||||
nextEmit := s
|
nextEmit := s
|
||||||
cv := load6432(src, s)
|
|
||||||
|
|
||||||
// Relative offsets
|
// Relative offsets
|
||||||
offset1 := int32(blk.recentOffsets[0])
|
offset1 := int32(blk.recentOffsets[0])
|
||||||
@ -174,7 +172,6 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) {
|
|||||||
blk.literals = append(blk.literals, src[nextEmit:until]...)
|
blk.literals = append(blk.literals, src[nextEmit:until]...)
|
||||||
s.litLen = uint32(until - nextEmit)
|
s.litLen = uint32(until - nextEmit)
|
||||||
}
|
}
|
||||||
_ = addLiterals
|
|
||||||
|
|
||||||
if debugEncoder {
|
if debugEncoder {
|
||||||
println("recent offsets:", blk.recentOffsets)
|
println("recent offsets:", blk.recentOffsets)
|
||||||
@ -189,53 +186,96 @@ encodeLoop:
|
|||||||
panic("offset0 was 0")
|
panic("offset0 was 0")
|
||||||
}
|
}
|
||||||
|
|
||||||
bestOf := func(a, b *match) *match {
|
const goodEnough = 250
|
||||||
if a.est-b.est+(a.s-b.s)*bitsPerByte>>10 < 0 {
|
|
||||||
return a
|
cv := load6432(src, s)
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
const goodEnough = 100
|
|
||||||
|
|
||||||
nextHashL := hashLen(cv, bestLongTableBits, bestLongLen)
|
nextHashL := hashLen(cv, bestLongTableBits, bestLongLen)
|
||||||
nextHashS := hashLen(cv, bestShortTableBits, bestShortLen)
|
nextHashS := hashLen(cv, bestShortTableBits, bestShortLen)
|
||||||
candidateL := e.longTable[nextHashL]
|
candidateL := e.longTable[nextHashL]
|
||||||
candidateS := e.table[nextHashS]
|
candidateS := e.table[nextHashS]
|
||||||
|
|
||||||
matchAt := func(offset int32, s int32, first uint32, rep int32) match {
|
// Set m to a match at offset if it looks like that will improve compression.
|
||||||
|
improve := func(m *match, offset int32, s int32, first uint32, rep int32) {
|
||||||
if s-offset >= e.maxMatchOff || load3232(src, offset) != first {
|
if s-offset >= e.maxMatchOff || load3232(src, offset) != first {
|
||||||
return match{s: s, est: highScore}
|
return
|
||||||
}
|
}
|
||||||
if debugAsserts {
|
if debugAsserts {
|
||||||
|
if offset <= 0 {
|
||||||
|
panic(offset)
|
||||||
|
}
|
||||||
if !bytes.Equal(src[s:s+4], src[offset:offset+4]) {
|
if !bytes.Equal(src[s:s+4], src[offset:offset+4]) {
|
||||||
panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first))
|
panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
m := match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep}
|
// Try to quick reject if we already have a long match.
|
||||||
m.estBits(bitsPerByte)
|
if m.length > 16 {
|
||||||
return m
|
left := len(src) - int(m.s+m.length)
|
||||||
|
// If we are too close to the end, keep as is.
|
||||||
|
if left <= 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
checkLen := m.length - (s - m.s) - 8
|
||||||
|
if left > 2 && checkLen > 4 {
|
||||||
|
// Check 4 bytes, 4 bytes from the end of the current match.
|
||||||
|
a := load3232(src, offset+checkLen)
|
||||||
|
b := load3232(src, s+checkLen)
|
||||||
|
if a != b {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
l := 4 + e.matchlen(s+4, offset+4, src)
|
||||||
|
if rep < 0 {
|
||||||
|
// Extend candidate match backwards as far as possible.
|
||||||
|
tMin := s - e.maxMatchOff
|
||||||
|
if tMin < 0 {
|
||||||
|
tMin = 0
|
||||||
|
}
|
||||||
|
for offset > tMin && s > nextEmit && src[offset-1] == src[s-1] && l < maxMatchLength {
|
||||||
|
s--
|
||||||
|
offset--
|
||||||
|
l++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
cand := match{offset: offset, s: s, length: l, rep: rep}
|
||||||
|
cand.estBits(bitsPerByte)
|
||||||
|
if m.est >= highScore || cand.est-m.est+(cand.s-m.s)*bitsPerByte>>10 < 0 {
|
||||||
|
*m = cand
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
m1 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1)
|
best := match{s: s, est: highScore}
|
||||||
m2 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)
|
improve(&best, candidateL.offset-e.cur, s, uint32(cv), -1)
|
||||||
m3 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)
|
improve(&best, candidateL.prev-e.cur, s, uint32(cv), -1)
|
||||||
m4 := matchAt(candidateS.prev-e.cur, s, uint32(cv), -1)
|
improve(&best, candidateS.offset-e.cur, s, uint32(cv), -1)
|
||||||
best := bestOf(bestOf(&m1, &m2), bestOf(&m3, &m4))
|
improve(&best, candidateS.prev-e.cur, s, uint32(cv), -1)
|
||||||
|
|
||||||
if canRepeat && best.length < goodEnough {
|
if canRepeat && best.length < goodEnough {
|
||||||
cv32 := uint32(cv >> 8)
|
if s == nextEmit {
|
||||||
spp := s + 1
|
// Check repeats straight after a match.
|
||||||
m1 := matchAt(spp-offset1, spp, cv32, 1)
|
improve(&best, s-offset2, s, uint32(cv), 1|4)
|
||||||
m2 := matchAt(spp-offset2, spp, cv32, 2)
|
improve(&best, s-offset3, s, uint32(cv), 2|4)
|
||||||
m3 := matchAt(spp-offset3, spp, cv32, 3)
|
if offset1 > 1 {
|
||||||
best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3))
|
improve(&best, s-(offset1-1), s, uint32(cv), 3|4)
|
||||||
if best.length > 0 {
|
}
|
||||||
cv32 = uint32(cv >> 24)
|
}
|
||||||
spp += 2
|
|
||||||
m1 := matchAt(spp-offset1, spp, cv32, 1)
|
// If either no match or a non-repeat match, check at + 1
|
||||||
m2 := matchAt(spp-offset2, spp, cv32, 2)
|
if best.rep <= 0 {
|
||||||
m3 := matchAt(spp-offset3, spp, cv32, 3)
|
cv32 := uint32(cv >> 8)
|
||||||
best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3))
|
spp := s + 1
|
||||||
|
improve(&best, spp-offset1, spp, cv32, 1)
|
||||||
|
improve(&best, spp-offset2, spp, cv32, 2)
|
||||||
|
improve(&best, spp-offset3, spp, cv32, 3)
|
||||||
|
if best.rep < 0 {
|
||||||
|
cv32 = uint32(cv >> 24)
|
||||||
|
spp += 2
|
||||||
|
improve(&best, spp-offset1, spp, cv32, 1)
|
||||||
|
improve(&best, spp-offset2, spp, cv32, 2)
|
||||||
|
improve(&best, spp-offset3, spp, cv32, 3)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Load next and check...
|
// Load next and check...
|
||||||
@ -250,47 +290,45 @@ encodeLoop:
|
|||||||
if s >= sLimit {
|
if s >= sLimit {
|
||||||
break encodeLoop
|
break encodeLoop
|
||||||
}
|
}
|
||||||
cv = load6432(src, s)
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
s++
|
|
||||||
candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)]
|
candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)]
|
||||||
cv = load6432(src, s)
|
cv = load6432(src, s+1)
|
||||||
cv2 := load6432(src, s+1)
|
cv2 := load6432(src, s+2)
|
||||||
candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)]
|
candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)]
|
||||||
candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)]
|
candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)]
|
||||||
|
|
||||||
// Short at s+1
|
// Short at s+1
|
||||||
m1 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)
|
improve(&best, candidateS.offset-e.cur, s+1, uint32(cv), -1)
|
||||||
// Long at s+1, s+2
|
// Long at s+1, s+2
|
||||||
m2 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1)
|
improve(&best, candidateL.offset-e.cur, s+1, uint32(cv), -1)
|
||||||
m3 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)
|
improve(&best, candidateL.prev-e.cur, s+1, uint32(cv), -1)
|
||||||
m4 := matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1)
|
improve(&best, candidateL2.offset-e.cur, s+2, uint32(cv2), -1)
|
||||||
m5 := matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1)
|
improve(&best, candidateL2.prev-e.cur, s+2, uint32(cv2), -1)
|
||||||
best = bestOf(bestOf(bestOf(best, &m1), &m2), bestOf(bestOf(&m3, &m4), &m5))
|
|
||||||
if false {
|
if false {
|
||||||
// Short at s+3.
|
// Short at s+3.
|
||||||
// Too often worse...
|
// Too often worse...
|
||||||
m := matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1)
|
improve(&best, e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+3, uint32(cv2>>8), -1)
|
||||||
best = bestOf(best, &m)
|
|
||||||
}
|
}
|
||||||
// See if we can find a better match by checking where the current best ends.
|
|
||||||
// Use that offset to see if we can find a better full match.
|
// Start check at a fixed offset to allow for a few mismatches.
|
||||||
if sAt := best.s + best.length; sAt < sLimit {
|
// For this compression level 2 yields the best results.
|
||||||
nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen)
|
// We cannot do this if we have already indexed this position.
|
||||||
candidateEnd := e.longTable[nextHashL]
|
const skipBeginning = 2
|
||||||
// Start check at a fixed offset to allow for a few mismatches.
|
if best.s > s-skipBeginning {
|
||||||
// For this compression level 2 yields the best results.
|
// See if we can find a better match by checking where the current best ends.
|
||||||
const skipBeginning = 2
|
// Use that offset to see if we can find a better full match.
|
||||||
if pos := candidateEnd.offset - e.cur - best.length + skipBeginning; pos >= 0 {
|
if sAt := best.s + best.length; sAt < sLimit {
|
||||||
m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
|
nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen)
|
||||||
bestEnd := bestOf(best, &m)
|
candidateEnd := e.longTable[nextHashL]
|
||||||
if pos := candidateEnd.prev - e.cur - best.length + skipBeginning; pos >= 0 {
|
|
||||||
m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
|
if off := candidateEnd.offset - e.cur - best.length + skipBeginning; off >= 0 {
|
||||||
bestEnd = bestOf(bestEnd, &m)
|
improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
|
||||||
|
if off := candidateEnd.prev - e.cur - best.length + skipBeginning; off >= 0 {
|
||||||
|
improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
best = bestEnd
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -303,51 +341,34 @@ encodeLoop:
|
|||||||
|
|
||||||
// We have a match, we can store the forward value
|
// We have a match, we can store the forward value
|
||||||
if best.rep > 0 {
|
if best.rep > 0 {
|
||||||
s = best.s
|
|
||||||
var seq seq
|
var seq seq
|
||||||
seq.matchLen = uint32(best.length - zstdMinMatch)
|
seq.matchLen = uint32(best.length - zstdMinMatch)
|
||||||
|
if debugAsserts && s <= nextEmit {
|
||||||
// We might be able to match backwards.
|
panic("s <= nextEmit")
|
||||||
// Extend as long as we can.
|
|
||||||
start := best.s
|
|
||||||
// We end the search early, so we don't risk 0 literals
|
|
||||||
// and have to do special offset treatment.
|
|
||||||
startLimit := nextEmit + 1
|
|
||||||
|
|
||||||
tMin := s - e.maxMatchOff
|
|
||||||
if tMin < 0 {
|
|
||||||
tMin = 0
|
|
||||||
}
|
}
|
||||||
repIndex := best.offset
|
addLiterals(&seq, best.s)
|
||||||
for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
|
|
||||||
repIndex--
|
|
||||||
start--
|
|
||||||
seq.matchLen++
|
|
||||||
}
|
|
||||||
addLiterals(&seq, start)
|
|
||||||
|
|
||||||
// rep 0
|
// Repeat. If bit 4 is set, this is a non-lit repeat.
|
||||||
seq.offset = uint32(best.rep)
|
seq.offset = uint32(best.rep & 3)
|
||||||
if debugSequences {
|
if debugSequences {
|
||||||
println("repeat sequence", seq, "next s:", s)
|
println("repeat sequence", seq, "next s:", s)
|
||||||
}
|
}
|
||||||
blk.sequences = append(blk.sequences, seq)
|
blk.sequences = append(blk.sequences, seq)
|
||||||
|
|
||||||
// Index match start+1 (long) -> s - 1
|
// Index old s + 1 -> s - 1
|
||||||
index0 := s
|
index0 := s + 1
|
||||||
s = best.s + best.length
|
s = best.s + best.length
|
||||||
|
|
||||||
nextEmit = s
|
nextEmit = s
|
||||||
if s >= sLimit {
|
if s >= sLimit {
|
||||||
if debugEncoder {
|
if debugEncoder {
|
||||||
println("repeat ended", s, best.length)
|
println("repeat ended", s, best.length)
|
||||||
|
|
||||||
}
|
}
|
||||||
break encodeLoop
|
break encodeLoop
|
||||||
}
|
}
|
||||||
// Index skipped...
|
// Index skipped...
|
||||||
off := index0 + e.cur
|
off := index0 + e.cur
|
||||||
for index0 < s-1 {
|
for index0 < s {
|
||||||
cv0 := load6432(src, index0)
|
cv0 := load6432(src, index0)
|
||||||
h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
|
h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
|
||||||
h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
|
h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
|
||||||
@ -357,17 +378,19 @@ encodeLoop:
|
|||||||
index0++
|
index0++
|
||||||
}
|
}
|
||||||
switch best.rep {
|
switch best.rep {
|
||||||
case 2:
|
case 2, 4 | 1:
|
||||||
offset1, offset2 = offset2, offset1
|
offset1, offset2 = offset2, offset1
|
||||||
case 3:
|
case 3, 4 | 2:
|
||||||
offset1, offset2, offset3 = offset3, offset1, offset2
|
offset1, offset2, offset3 = offset3, offset1, offset2
|
||||||
|
case 4 | 3:
|
||||||
|
offset1, offset2, offset3 = offset1-1, offset1, offset2
|
||||||
}
|
}
|
||||||
cv = load6432(src, s)
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// A 4-byte match has been found. Update recent offsets.
|
// A 4-byte match has been found. Update recent offsets.
|
||||||
// We'll later see if more than 4 bytes.
|
// We'll later see if more than 4 bytes.
|
||||||
|
index0 := s + 1
|
||||||
s = best.s
|
s = best.s
|
||||||
t := best.offset
|
t := best.offset
|
||||||
offset1, offset2, offset3 = s-t, offset1, offset2
|
offset1, offset2, offset3 = s-t, offset1, offset2
|
||||||
@ -380,22 +403,9 @@ encodeLoop:
|
|||||||
panic("invalid offset")
|
panic("invalid offset")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Extend the n-byte match as long as possible.
|
|
||||||
l := best.length
|
|
||||||
|
|
||||||
// Extend backwards
|
|
||||||
tMin := s - e.maxMatchOff
|
|
||||||
if tMin < 0 {
|
|
||||||
tMin = 0
|
|
||||||
}
|
|
||||||
for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
|
|
||||||
s--
|
|
||||||
t--
|
|
||||||
l++
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write our sequence
|
// Write our sequence
|
||||||
var seq seq
|
var seq seq
|
||||||
|
l := best.length
|
||||||
seq.litLen = uint32(s - nextEmit)
|
seq.litLen = uint32(s - nextEmit)
|
||||||
seq.matchLen = uint32(l - zstdMinMatch)
|
seq.matchLen = uint32(l - zstdMinMatch)
|
||||||
if seq.litLen > 0 {
|
if seq.litLen > 0 {
|
||||||
@ -412,10 +422,8 @@ encodeLoop:
|
|||||||
break encodeLoop
|
break encodeLoop
|
||||||
}
|
}
|
||||||
|
|
||||||
// Index match start+1 (long) -> s - 1
|
// Index old s + 1 -> s - 1
|
||||||
index0 := s - l + 1
|
for index0 < s {
|
||||||
// every entry
|
|
||||||
for index0 < s-1 {
|
|
||||||
cv0 := load6432(src, index0)
|
cv0 := load6432(src, index0)
|
||||||
h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
|
h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
|
||||||
h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
|
h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
|
||||||
@ -424,50 +432,6 @@ encodeLoop:
|
|||||||
e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset}
|
e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset}
|
||||||
index0++
|
index0++
|
||||||
}
|
}
|
||||||
|
|
||||||
cv = load6432(src, s)
|
|
||||||
if !canRepeat {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check offset 2
|
|
||||||
for {
|
|
||||||
o2 := s - offset2
|
|
||||||
if load3232(src, o2) != uint32(cv) {
|
|
||||||
// Do regular search
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store this, since we have it.
|
|
||||||
nextHashS := hashLen(cv, bestShortTableBits, bestShortLen)
|
|
||||||
nextHashL := hashLen(cv, bestLongTableBits, bestLongLen)
|
|
||||||
|
|
||||||
// We have at least 4 byte match.
|
|
||||||
// No need to check backwards. We come straight from a match
|
|
||||||
l := 4 + e.matchlen(s+4, o2+4, src)
|
|
||||||
|
|
||||||
e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset}
|
|
||||||
e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: e.table[nextHashS].offset}
|
|
||||||
seq.matchLen = uint32(l) - zstdMinMatch
|
|
||||||
seq.litLen = 0
|
|
||||||
|
|
||||||
// Since litlen is always 0, this is offset 1.
|
|
||||||
seq.offset = 1
|
|
||||||
s += l
|
|
||||||
nextEmit = s
|
|
||||||
if debugSequences {
|
|
||||||
println("sequence", seq, "next s:", s)
|
|
||||||
}
|
|
||||||
blk.sequences = append(blk.sequences, seq)
|
|
||||||
|
|
||||||
// Swap offset 1 and 2.
|
|
||||||
offset1, offset2 = offset2, offset1
|
|
||||||
if s >= sLimit {
|
|
||||||
// Finished
|
|
||||||
break encodeLoop
|
|
||||||
}
|
|
||||||
cv = load6432(src, s)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if int(nextEmit) < len(src) {
|
if int(nextEmit) < len(src) {
|
||||||
|
2
vendor/github.com/klauspost/compress/zstd/enc_dfast.go
generated
vendored
2
vendor/github.com/klauspost/compress/zstd/enc_dfast.go
generated
vendored
@ -1084,7 +1084,7 @@ func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
e.lastDictID = d.id
|
e.lastDictID = d.id
|
||||||
e.allDirty = true
|
allDirty = true
|
||||||
}
|
}
|
||||||
// Reset table to initial state
|
// Reset table to initial state
|
||||||
e.cur = e.maxMatchOff
|
e.cur = e.maxMatchOff
|
||||||
|
17
vendor/github.com/klauspost/compress/zstd/enc_fast.go
generated
vendored
17
vendor/github.com/klauspost/compress/zstd/enc_fast.go
generated
vendored
@ -133,8 +133,7 @@ encodeLoop:
|
|||||||
if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) {
|
if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) {
|
||||||
// Consider history as well.
|
// Consider history as well.
|
||||||
var seq seq
|
var seq seq
|
||||||
var length int32
|
length := 4 + e.matchlen(s+6, repIndex+4, src)
|
||||||
length = 4 + e.matchlen(s+6, repIndex+4, src)
|
|
||||||
seq.matchLen = uint32(length - zstdMinMatch)
|
seq.matchLen = uint32(length - zstdMinMatch)
|
||||||
|
|
||||||
// We might be able to match backwards.
|
// We might be able to match backwards.
|
||||||
@ -645,8 +644,7 @@ encodeLoop:
|
|||||||
if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) {
|
if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) {
|
||||||
// Consider history as well.
|
// Consider history as well.
|
||||||
var seq seq
|
var seq seq
|
||||||
var length int32
|
length := 4 + e.matchlen(s+6, repIndex+4, src)
|
||||||
length = 4 + e.matchlen(s+6, repIndex+4, src)
|
|
||||||
|
|
||||||
seq.matchLen = uint32(length - zstdMinMatch)
|
seq.matchLen = uint32(length - zstdMinMatch)
|
||||||
|
|
||||||
@ -831,13 +829,12 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) {
|
|||||||
}
|
}
|
||||||
if true {
|
if true {
|
||||||
end := e.maxMatchOff + int32(len(d.content)) - 8
|
end := e.maxMatchOff + int32(len(d.content)) - 8
|
||||||
for i := e.maxMatchOff; i < end; i += 3 {
|
for i := e.maxMatchOff; i < end; i += 2 {
|
||||||
const hashLog = tableBits
|
const hashLog = tableBits
|
||||||
|
|
||||||
cv := load6432(d.content, i-e.maxMatchOff)
|
cv := load6432(d.content, i-e.maxMatchOff)
|
||||||
nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 5
|
nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 6
|
||||||
nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 6
|
nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 7
|
||||||
nextHash2 := hashLen(cv>>16, hashLog, tableFastHashLen) // 2 -> 7
|
|
||||||
e.dictTable[nextHash] = tableEntry{
|
e.dictTable[nextHash] = tableEntry{
|
||||||
val: uint32(cv),
|
val: uint32(cv),
|
||||||
offset: i,
|
offset: i,
|
||||||
@ -846,10 +843,6 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) {
|
|||||||
val: uint32(cv >> 8),
|
val: uint32(cv >> 8),
|
||||||
offset: i + 1,
|
offset: i + 1,
|
||||||
}
|
}
|
||||||
e.dictTable[nextHash2] = tableEntry{
|
|
||||||
val: uint32(cv >> 16),
|
|
||||||
offset: i + 2,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
e.lastDictID = d.id
|
e.lastDictID = d.id
|
||||||
|
91
vendor/github.com/klauspost/compress/zstd/encoder.go
generated
vendored
91
vendor/github.com/klauspost/compress/zstd/encoder.go
generated
vendored
@ -227,10 +227,7 @@ func (e *Encoder) nextBlock(final bool) error {
|
|||||||
DictID: e.o.dict.ID(),
|
DictID: e.o.dict.ID(),
|
||||||
}
|
}
|
||||||
|
|
||||||
dst, err := fh.appendTo(tmp[:0])
|
dst := fh.appendTo(tmp[:0])
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
s.headerWritten = true
|
s.headerWritten = true
|
||||||
s.wWg.Wait()
|
s.wWg.Wait()
|
||||||
var n2 int
|
var n2 int
|
||||||
@ -277,23 +274,9 @@ func (e *Encoder) nextBlock(final bool) error {
|
|||||||
s.eofWritten = true
|
s.eofWritten = true
|
||||||
}
|
}
|
||||||
|
|
||||||
err := errIncompressible
|
s.err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
|
||||||
// If we got the exact same number of literals as input,
|
if s.err != nil {
|
||||||
// assume the literals cannot be compressed.
|
return s.err
|
||||||
if len(src) != len(blk.literals) || len(src) != e.o.blockSize {
|
|
||||||
err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
|
|
||||||
}
|
|
||||||
switch err {
|
|
||||||
case errIncompressible:
|
|
||||||
if debugEncoder {
|
|
||||||
println("Storing incompressible block as raw")
|
|
||||||
}
|
|
||||||
blk.encodeRaw(src)
|
|
||||||
// In fast mode, we do not transfer offsets, so we don't have to deal with changing the.
|
|
||||||
case nil:
|
|
||||||
default:
|
|
||||||
s.err = err
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
_, s.err = s.w.Write(blk.output)
|
_, s.err = s.w.Write(blk.output)
|
||||||
s.nWritten += int64(len(blk.output))
|
s.nWritten += int64(len(blk.output))
|
||||||
@ -343,22 +326,8 @@ func (e *Encoder) nextBlock(final bool) error {
|
|||||||
}
|
}
|
||||||
s.wWg.Done()
|
s.wWg.Done()
|
||||||
}()
|
}()
|
||||||
err := errIncompressible
|
s.writeErr = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
|
||||||
// If we got the exact same number of literals as input,
|
if s.writeErr != nil {
|
||||||
// assume the literals cannot be compressed.
|
|
||||||
if len(src) != len(blk.literals) || len(src) != e.o.blockSize {
|
|
||||||
err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
|
|
||||||
}
|
|
||||||
switch err {
|
|
||||||
case errIncompressible:
|
|
||||||
if debugEncoder {
|
|
||||||
println("Storing incompressible block as raw")
|
|
||||||
}
|
|
||||||
blk.encodeRaw(src)
|
|
||||||
// In fast mode, we do not transfer offsets, so we don't have to deal with changing the.
|
|
||||||
case nil:
|
|
||||||
default:
|
|
||||||
s.writeErr = err
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
_, s.writeErr = s.w.Write(blk.output)
|
_, s.writeErr = s.w.Write(blk.output)
|
||||||
@ -511,7 +480,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
|
|||||||
Checksum: false,
|
Checksum: false,
|
||||||
DictID: 0,
|
DictID: 0,
|
||||||
}
|
}
|
||||||
dst, _ = fh.appendTo(dst)
|
dst = fh.appendTo(dst)
|
||||||
|
|
||||||
// Write raw block as last one only.
|
// Write raw block as last one only.
|
||||||
var blk blockHeader
|
var blk blockHeader
|
||||||
@ -546,10 +515,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
|
|||||||
if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem {
|
if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem {
|
||||||
dst = make([]byte, 0, len(src))
|
dst = make([]byte, 0, len(src))
|
||||||
}
|
}
|
||||||
dst, err := fh.appendTo(dst)
|
dst = fh.appendTo(dst)
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we can do everything in one block, prefer that.
|
// If we can do everything in one block, prefer that.
|
||||||
if len(src) <= e.o.blockSize {
|
if len(src) <= e.o.blockSize {
|
||||||
@ -568,25 +534,15 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
|
|||||||
|
|
||||||
// If we got the exact same number of literals as input,
|
// If we got the exact same number of literals as input,
|
||||||
// assume the literals cannot be compressed.
|
// assume the literals cannot be compressed.
|
||||||
err := errIncompressible
|
|
||||||
oldout := blk.output
|
oldout := blk.output
|
||||||
if len(blk.literals) != len(src) || len(src) != e.o.blockSize {
|
// Output directly to dst
|
||||||
// Output directly to dst
|
blk.output = dst
|
||||||
blk.output = dst
|
|
||||||
err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
|
|
||||||
}
|
|
||||||
|
|
||||||
switch err {
|
err := blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
|
||||||
case errIncompressible:
|
if err != nil {
|
||||||
if debugEncoder {
|
|
||||||
println("Storing incompressible block as raw")
|
|
||||||
}
|
|
||||||
dst = blk.encodeRawTo(dst, src)
|
|
||||||
case nil:
|
|
||||||
dst = blk.output
|
|
||||||
default:
|
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
dst = blk.output
|
||||||
blk.output = oldout
|
blk.output = oldout
|
||||||
} else {
|
} else {
|
||||||
enc.Reset(e.o.dict, false)
|
enc.Reset(e.o.dict, false)
|
||||||
@ -605,25 +561,11 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
|
|||||||
if len(src) == 0 {
|
if len(src) == 0 {
|
||||||
blk.last = true
|
blk.last = true
|
||||||
}
|
}
|
||||||
err := errIncompressible
|
err := blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy)
|
||||||
// If we got the exact same number of literals as input,
|
if err != nil {
|
||||||
// assume the literals cannot be compressed.
|
|
||||||
if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize {
|
|
||||||
err = blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy)
|
|
||||||
}
|
|
||||||
|
|
||||||
switch err {
|
|
||||||
case errIncompressible:
|
|
||||||
if debugEncoder {
|
|
||||||
println("Storing incompressible block as raw")
|
|
||||||
}
|
|
||||||
dst = blk.encodeRawTo(dst, todo)
|
|
||||||
blk.popOffsets()
|
|
||||||
case nil:
|
|
||||||
dst = append(dst, blk.output...)
|
|
||||||
default:
|
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
dst = append(dst, blk.output...)
|
||||||
blk.reset(nil)
|
blk.reset(nil)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -633,6 +575,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
|
|||||||
// Add padding with content from crypto/rand.Reader
|
// Add padding with content from crypto/rand.Reader
|
||||||
if e.o.pad > 0 {
|
if e.o.pad > 0 {
|
||||||
add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad))
|
add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad))
|
||||||
|
var err error
|
||||||
dst, err = skippableFrame(dst, add, rand.Reader)
|
dst, err = skippableFrame(dst, add, rand.Reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
|
6
vendor/github.com/klauspost/compress/zstd/encoder_options.go
generated
vendored
6
vendor/github.com/klauspost/compress/zstd/encoder_options.go
generated
vendored
@ -39,7 +39,7 @@ func (o *encoderOptions) setDefault() {
|
|||||||
blockSize: maxCompressedBlockSize,
|
blockSize: maxCompressedBlockSize,
|
||||||
windowSize: 8 << 20,
|
windowSize: 8 << 20,
|
||||||
level: SpeedDefault,
|
level: SpeedDefault,
|
||||||
allLitEntropy: true,
|
allLitEntropy: false,
|
||||||
lowMem: false,
|
lowMem: false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -129,7 +129,7 @@ func WithEncoderPadding(n int) EOption {
|
|||||||
}
|
}
|
||||||
// No need to waste our time.
|
// No need to waste our time.
|
||||||
if n == 1 {
|
if n == 1 {
|
||||||
o.pad = 0
|
n = 0
|
||||||
}
|
}
|
||||||
if n > 1<<30 {
|
if n > 1<<30 {
|
||||||
return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ")
|
return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ")
|
||||||
@ -238,7 +238,7 @@ func WithEncoderLevel(l EncoderLevel) EOption {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !o.customALEntropy {
|
if !o.customALEntropy {
|
||||||
o.allLitEntropy = l > SpeedFastest
|
o.allLitEntropy = l > SpeedDefault
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
43
vendor/github.com/klauspost/compress/zstd/framedec.go
generated
vendored
43
vendor/github.com/klauspost/compress/zstd/framedec.go
generated
vendored
@ -73,20 +73,20 @@ func (d *frameDec) reset(br byteBuffer) error {
|
|||||||
switch err {
|
switch err {
|
||||||
case io.EOF, io.ErrUnexpectedEOF:
|
case io.EOF, io.ErrUnexpectedEOF:
|
||||||
return io.EOF
|
return io.EOF
|
||||||
default:
|
|
||||||
return err
|
|
||||||
case nil:
|
case nil:
|
||||||
signature[0] = b[0]
|
signature[0] = b[0]
|
||||||
|
default:
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
// Read the rest, don't allow io.ErrUnexpectedEOF
|
// Read the rest, don't allow io.ErrUnexpectedEOF
|
||||||
b, err = br.readSmall(3)
|
b, err = br.readSmall(3)
|
||||||
switch err {
|
switch err {
|
||||||
case io.EOF:
|
case io.EOF:
|
||||||
return io.EOF
|
return io.EOF
|
||||||
default:
|
|
||||||
return err
|
|
||||||
case nil:
|
case nil:
|
||||||
copy(signature[1:], b)
|
copy(signature[1:], b)
|
||||||
|
default:
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 {
|
if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 {
|
||||||
@ -293,13 +293,9 @@ func (d *frameDec) next(block *blockDec) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// checkCRC will check the checksum if the frame has one.
|
// checkCRC will check the checksum, assuming the frame has one.
|
||||||
// Will return ErrCRCMismatch if crc check failed, otherwise nil.
|
// Will return ErrCRCMismatch if crc check failed, otherwise nil.
|
||||||
func (d *frameDec) checkCRC() error {
|
func (d *frameDec) checkCRC() error {
|
||||||
if !d.HasCheckSum {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// We can overwrite upper tmp now
|
// We can overwrite upper tmp now
|
||||||
buf, err := d.rawInput.readSmall(4)
|
buf, err := d.rawInput.readSmall(4)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -307,10 +303,6 @@ func (d *frameDec) checkCRC() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if d.o.ignoreChecksum {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
want := binary.LittleEndian.Uint32(buf[:4])
|
want := binary.LittleEndian.Uint32(buf[:4])
|
||||||
got := uint32(d.crc.Sum64())
|
got := uint32(d.crc.Sum64())
|
||||||
|
|
||||||
@ -326,17 +318,13 @@ func (d *frameDec) checkCRC() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// consumeCRC reads the checksum data if the frame has one.
|
// consumeCRC skips over the checksum, assuming the frame has one.
|
||||||
func (d *frameDec) consumeCRC() error {
|
func (d *frameDec) consumeCRC() error {
|
||||||
if d.HasCheckSum {
|
_, err := d.rawInput.readSmall(4)
|
||||||
_, err := d.rawInput.readSmall(4)
|
if err != nil {
|
||||||
if err != nil {
|
println("CRC missing?", err)
|
||||||
println("CRC missing?", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
return err
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// runDecoder will run the decoder for the remainder of the frame.
|
// runDecoder will run the decoder for the remainder of the frame.
|
||||||
@ -415,15 +403,8 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
|
|||||||
if d.o.ignoreChecksum {
|
if d.o.ignoreChecksum {
|
||||||
err = d.consumeCRC()
|
err = d.consumeCRC()
|
||||||
} else {
|
} else {
|
||||||
var n int
|
d.crc.Write(dst[crcStart:])
|
||||||
n, err = d.crc.Write(dst[crcStart:])
|
err = d.checkCRC()
|
||||||
if err == nil {
|
|
||||||
if n != len(dst)-crcStart {
|
|
||||||
err = io.ErrShortWrite
|
|
||||||
} else {
|
|
||||||
err = d.checkCRC()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
4
vendor/github.com/klauspost/compress/zstd/frameenc.go
generated
vendored
4
vendor/github.com/klauspost/compress/zstd/frameenc.go
generated
vendored
@ -22,7 +22,7 @@ type frameHeader struct {
|
|||||||
|
|
||||||
const maxHeaderSize = 14
|
const maxHeaderSize = 14
|
||||||
|
|
||||||
func (f frameHeader) appendTo(dst []byte) ([]byte, error) {
|
func (f frameHeader) appendTo(dst []byte) []byte {
|
||||||
dst = append(dst, frameMagic...)
|
dst = append(dst, frameMagic...)
|
||||||
var fhd uint8
|
var fhd uint8
|
||||||
if f.Checksum {
|
if f.Checksum {
|
||||||
@ -88,7 +88,7 @@ func (f frameHeader) appendTo(dst []byte) ([]byte, error) {
|
|||||||
default:
|
default:
|
||||||
panic("invalid fcs")
|
panic("invalid fcs")
|
||||||
}
|
}
|
||||||
return dst, nil
|
return dst
|
||||||
}
|
}
|
||||||
|
|
||||||
const skippableFrameHeader = 4 + 4
|
const skippableFrameHeader = 4 + 4
|
||||||
|
16
vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go
generated
vendored
Normal file
16
vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
//go:build amd64 && !appengine && !noasm && gc
|
||||||
|
// +build amd64,!appengine,!noasm,gc
|
||||||
|
|
||||||
|
// Copyright 2019+ Klaus Post. All rights reserved.
|
||||||
|
// License information can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package zstd
|
||||||
|
|
||||||
|
// matchLen returns how many bytes match in a and b
|
||||||
|
//
|
||||||
|
// It assumes that:
|
||||||
|
//
|
||||||
|
// len(a) <= len(b) and len(a) > 0
|
||||||
|
//
|
||||||
|
//go:noescape
|
||||||
|
func matchLen(a []byte, b []byte) int
|
68
vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s
generated
vendored
Normal file
68
vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s
generated
vendored
Normal file
@ -0,0 +1,68 @@
|
|||||||
|
// Copied from S2 implementation.
|
||||||
|
|
||||||
|
//go:build !appengine && !noasm && gc && !noasm
|
||||||
|
|
||||||
|
#include "textflag.h"
|
||||||
|
|
||||||
|
// func matchLen(a []byte, b []byte) int
|
||||||
|
// Requires: BMI
|
||||||
|
TEXT ·matchLen(SB), NOSPLIT, $0-56
|
||||||
|
MOVQ a_base+0(FP), AX
|
||||||
|
MOVQ b_base+24(FP), CX
|
||||||
|
MOVQ a_len+8(FP), DX
|
||||||
|
|
||||||
|
// matchLen
|
||||||
|
XORL SI, SI
|
||||||
|
CMPL DX, $0x08
|
||||||
|
JB matchlen_match4_standalone
|
||||||
|
|
||||||
|
matchlen_loopback_standalone:
|
||||||
|
MOVQ (AX)(SI*1), BX
|
||||||
|
XORQ (CX)(SI*1), BX
|
||||||
|
TESTQ BX, BX
|
||||||
|
JZ matchlen_loop_standalone
|
||||||
|
|
||||||
|
#ifdef GOAMD64_v3
|
||||||
|
TZCNTQ BX, BX
|
||||||
|
#else
|
||||||
|
BSFQ BX, BX
|
||||||
|
#endif
|
||||||
|
SARQ $0x03, BX
|
||||||
|
LEAL (SI)(BX*1), SI
|
||||||
|
JMP gen_match_len_end
|
||||||
|
|
||||||
|
matchlen_loop_standalone:
|
||||||
|
LEAL -8(DX), DX
|
||||||
|
LEAL 8(SI), SI
|
||||||
|
CMPL DX, $0x08
|
||||||
|
JAE matchlen_loopback_standalone
|
||||||
|
|
||||||
|
matchlen_match4_standalone:
|
||||||
|
CMPL DX, $0x04
|
||||||
|
JB matchlen_match2_standalone
|
||||||
|
MOVL (AX)(SI*1), BX
|
||||||
|
CMPL (CX)(SI*1), BX
|
||||||
|
JNE matchlen_match2_standalone
|
||||||
|
LEAL -4(DX), DX
|
||||||
|
LEAL 4(SI), SI
|
||||||
|
|
||||||
|
matchlen_match2_standalone:
|
||||||
|
CMPL DX, $0x02
|
||||||
|
JB matchlen_match1_standalone
|
||||||
|
MOVW (AX)(SI*1), BX
|
||||||
|
CMPW (CX)(SI*1), BX
|
||||||
|
JNE matchlen_match1_standalone
|
||||||
|
LEAL -2(DX), DX
|
||||||
|
LEAL 2(SI), SI
|
||||||
|
|
||||||
|
matchlen_match1_standalone:
|
||||||
|
CMPL DX, $0x01
|
||||||
|
JB gen_match_len_end
|
||||||
|
MOVB (AX)(SI*1), BL
|
||||||
|
CMPB (CX)(SI*1), BL
|
||||||
|
JNE gen_match_len_end
|
||||||
|
INCL SI
|
||||||
|
|
||||||
|
gen_match_len_end:
|
||||||
|
MOVQ SI, ret+48(FP)
|
||||||
|
RET
|
33
vendor/github.com/klauspost/compress/zstd/matchlen_generic.go
generated
vendored
Normal file
33
vendor/github.com/klauspost/compress/zstd/matchlen_generic.go
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
//go:build !amd64 || appengine || !gc || noasm
|
||||||
|
// +build !amd64 appengine !gc noasm
|
||||||
|
|
||||||
|
// Copyright 2019+ Klaus Post. All rights reserved.
|
||||||
|
// License information can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package zstd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"math/bits"
|
||||||
|
)
|
||||||
|
|
||||||
|
// matchLen returns the maximum common prefix length of a and b.
|
||||||
|
// a must be the shortest of the two.
|
||||||
|
func matchLen(a, b []byte) (n int) {
|
||||||
|
for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] {
|
||||||
|
diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b)
|
||||||
|
if diff != 0 {
|
||||||
|
return n + bits.TrailingZeros64(diff)>>3
|
||||||
|
}
|
||||||
|
n += 8
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range a {
|
||||||
|
if a[i] != b[i] {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
n++
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
|
||||||
|
}
|
28
vendor/github.com/klauspost/compress/zstd/seqdec.go
generated
vendored
28
vendor/github.com/klauspost/compress/zstd/seqdec.go
generated
vendored
@ -236,13 +236,16 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
|
|||||||
maxBlockSize = s.windowSize
|
maxBlockSize = s.windowSize
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if debugDecoder {
|
||||||
|
println("decodeSync: decoding", seqs, "sequences", br.remain(), "bits remain on stream")
|
||||||
|
}
|
||||||
for i := seqs - 1; i >= 0; i-- {
|
for i := seqs - 1; i >= 0; i-- {
|
||||||
if br.overread() {
|
if br.overread() {
|
||||||
printf("reading sequence %d, exceeded available data\n", seqs-i)
|
printf("reading sequence %d, exceeded available data. Overread by %d\n", seqs-i, -br.remain())
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
var ll, mo, ml int
|
var ll, mo, ml int
|
||||||
if br.off > 4+((maxOffsetBits+16+16)>>3) {
|
if len(br.in) > 4+((maxOffsetBits+16+16)>>3) {
|
||||||
// inlined function:
|
// inlined function:
|
||||||
// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
|
// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
|
||||||
|
|
||||||
@ -314,9 +317,6 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
|
|||||||
}
|
}
|
||||||
size := ll + ml + len(out)
|
size := ll + ml + len(out)
|
||||||
if size-startSize > maxBlockSize {
|
if size-startSize > maxBlockSize {
|
||||||
if size-startSize == 424242 {
|
|
||||||
panic("here")
|
|
||||||
}
|
|
||||||
return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
||||||
}
|
}
|
||||||
if size > cap(out) {
|
if size > cap(out) {
|
||||||
@ -427,8 +427,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if space for literals
|
if size := len(s.literals) + len(out) - startSize; size > maxBlockSize {
|
||||||
if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize {
|
|
||||||
return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -453,18 +452,13 @@ func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol)
|
|||||||
|
|
||||||
// extra bits are stored in reverse order.
|
// extra bits are stored in reverse order.
|
||||||
br.fill()
|
br.fill()
|
||||||
if s.maxBits <= 32 {
|
mo += br.getBits(moB)
|
||||||
mo += br.getBits(moB)
|
if s.maxBits > 32 {
|
||||||
ml += br.getBits(mlB)
|
|
||||||
ll += br.getBits(llB)
|
|
||||||
} else {
|
|
||||||
mo += br.getBits(moB)
|
|
||||||
br.fill()
|
br.fill()
|
||||||
// matchlength+literal length, max 32 bits
|
|
||||||
ml += br.getBits(mlB)
|
|
||||||
ll += br.getBits(llB)
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
// matchlength+literal length, max 32 bits
|
||||||
|
ml += br.getBits(mlB)
|
||||||
|
ll += br.getBits(llB)
|
||||||
mo = s.adjustOffset(mo, ll, moB)
|
mo = s.adjustOffset(mo, ll, moB)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
17
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
generated
vendored
17
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
generated
vendored
@ -5,6 +5,7 @@ package zstd
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
|
|
||||||
"github.com/klauspost/compress/internal/cpuinfo"
|
"github.com/klauspost/compress/internal/cpuinfo"
|
||||||
)
|
)
|
||||||
@ -134,6 +135,9 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
|
|||||||
return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available",
|
return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available",
|
||||||
ctx.ll, ctx.litRemain+ctx.ll)
|
ctx.ll, ctx.litRemain+ctx.ll)
|
||||||
|
|
||||||
|
case errorOverread:
|
||||||
|
return true, io.ErrUnexpectedEOF
|
||||||
|
|
||||||
case errorNotEnoughSpace:
|
case errorNotEnoughSpace:
|
||||||
size := ctx.outPosition + ctx.ll + ctx.ml
|
size := ctx.outPosition + ctx.ll + ctx.ml
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
@ -148,7 +152,6 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
|
|||||||
s.seqSize += ctx.litRemain
|
s.seqSize += ctx.litRemain
|
||||||
if s.seqSize > maxBlockSize {
|
if s.seqSize > maxBlockSize {
|
||||||
return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
||||||
|
|
||||||
}
|
}
|
||||||
err := br.close()
|
err := br.close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -203,6 +206,9 @@ const errorNotEnoughLiterals = 4
|
|||||||
// error reported when capacity of `out` is too small
|
// error reported when capacity of `out` is too small
|
||||||
const errorNotEnoughSpace = 5
|
const errorNotEnoughSpace = 5
|
||||||
|
|
||||||
|
// error reported when bits are overread.
|
||||||
|
const errorOverread = 6
|
||||||
|
|
||||||
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
|
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
|
||||||
//
|
//
|
||||||
// Please refer to seqdec_generic.go for the reference implementation.
|
// Please refer to seqdec_generic.go for the reference implementation.
|
||||||
@ -248,6 +254,10 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
|
|||||||
litRemain: len(s.literals),
|
litRemain: len(s.literals),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if debugDecoder {
|
||||||
|
println("decode: decoding", len(seqs), "sequences", br.remain(), "bits remain on stream")
|
||||||
|
}
|
||||||
|
|
||||||
s.seqSize = 0
|
s.seqSize = 0
|
||||||
lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56
|
lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56
|
||||||
var errCode int
|
var errCode int
|
||||||
@ -278,6 +288,8 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
|
|||||||
case errorNotEnoughLiterals:
|
case errorNotEnoughLiterals:
|
||||||
ll := ctx.seqs[i].ll
|
ll := ctx.seqs[i].ll
|
||||||
return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll)
|
return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll)
|
||||||
|
case errorOverread:
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode)
|
return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode)
|
||||||
@ -292,6 +304,9 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
|
|||||||
if s.seqSize > maxBlockSize {
|
if s.seqSize > maxBlockSize {
|
||||||
return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
||||||
}
|
}
|
||||||
|
if debugDecoder {
|
||||||
|
println("decode: ", br.remain(), "bits remain on stream. code:", errCode)
|
||||||
|
}
|
||||||
err := br.close()
|
err := br.close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
printf("Closing sequences: %v, %+v\n", err, *br)
|
printf("Closing sequences: %v, %+v\n", err, *br)
|
||||||
|
252
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
generated
vendored
252
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
generated
vendored
@ -5,11 +5,11 @@
|
|||||||
// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
||||||
// Requires: CMOV
|
// Requires: CMOV
|
||||||
TEXT ·sequenceDecs_decode_amd64(SB), $8-32
|
TEXT ·sequenceDecs_decode_amd64(SB), $8-32
|
||||||
MOVQ br+8(FP), AX
|
MOVQ br+8(FP), CX
|
||||||
MOVQ 32(AX), DX
|
MOVQ 24(CX), DX
|
||||||
MOVBQZX 40(AX), BX
|
MOVBQZX 32(CX), BX
|
||||||
MOVQ 24(AX), SI
|
MOVQ (CX), AX
|
||||||
MOVQ (AX), AX
|
MOVQ 8(CX), SI
|
||||||
ADDQ SI, AX
|
ADDQ SI, AX
|
||||||
MOVQ AX, (SP)
|
MOVQ AX, (SP)
|
||||||
MOVQ ctx+16(FP), AX
|
MOVQ ctx+16(FP), AX
|
||||||
@ -38,7 +38,7 @@ sequenceDecs_decode_amd64_main_loop:
|
|||||||
|
|
||||||
sequenceDecs_decode_amd64_fill_byte_by_byte:
|
sequenceDecs_decode_amd64_fill_byte_by_byte:
|
||||||
CMPQ SI, $0x00
|
CMPQ SI, $0x00
|
||||||
JLE sequenceDecs_decode_amd64_fill_end
|
JLE sequenceDecs_decode_amd64_fill_check_overread
|
||||||
CMPQ BX, $0x07
|
CMPQ BX, $0x07
|
||||||
JLE sequenceDecs_decode_amd64_fill_end
|
JLE sequenceDecs_decode_amd64_fill_end
|
||||||
SHLQ $0x08, DX
|
SHLQ $0x08, DX
|
||||||
@ -49,6 +49,10 @@ sequenceDecs_decode_amd64_fill_byte_by_byte:
|
|||||||
ORQ AX, DX
|
ORQ AX, DX
|
||||||
JMP sequenceDecs_decode_amd64_fill_byte_by_byte
|
JMP sequenceDecs_decode_amd64_fill_byte_by_byte
|
||||||
|
|
||||||
|
sequenceDecs_decode_amd64_fill_check_overread:
|
||||||
|
CMPQ BX, $0x40
|
||||||
|
JA error_overread
|
||||||
|
|
||||||
sequenceDecs_decode_amd64_fill_end:
|
sequenceDecs_decode_amd64_fill_end:
|
||||||
// Update offset
|
// Update offset
|
||||||
MOVQ R9, AX
|
MOVQ R9, AX
|
||||||
@ -105,7 +109,7 @@ sequenceDecs_decode_amd64_ml_update_zero:
|
|||||||
|
|
||||||
sequenceDecs_decode_amd64_fill_2_byte_by_byte:
|
sequenceDecs_decode_amd64_fill_2_byte_by_byte:
|
||||||
CMPQ SI, $0x00
|
CMPQ SI, $0x00
|
||||||
JLE sequenceDecs_decode_amd64_fill_2_end
|
JLE sequenceDecs_decode_amd64_fill_2_check_overread
|
||||||
CMPQ BX, $0x07
|
CMPQ BX, $0x07
|
||||||
JLE sequenceDecs_decode_amd64_fill_2_end
|
JLE sequenceDecs_decode_amd64_fill_2_end
|
||||||
SHLQ $0x08, DX
|
SHLQ $0x08, DX
|
||||||
@ -116,6 +120,10 @@ sequenceDecs_decode_amd64_fill_2_byte_by_byte:
|
|||||||
ORQ AX, DX
|
ORQ AX, DX
|
||||||
JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte
|
JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte
|
||||||
|
|
||||||
|
sequenceDecs_decode_amd64_fill_2_check_overread:
|
||||||
|
CMPQ BX, $0x40
|
||||||
|
JA error_overread
|
||||||
|
|
||||||
sequenceDecs_decode_amd64_fill_2_end:
|
sequenceDecs_decode_amd64_fill_2_end:
|
||||||
// Update literal length
|
// Update literal length
|
||||||
MOVQ DI, AX
|
MOVQ DI, AX
|
||||||
@ -293,9 +301,9 @@ sequenceDecs_decode_amd64_match_len_ofs_ok:
|
|||||||
MOVQ R12, 152(AX)
|
MOVQ R12, 152(AX)
|
||||||
MOVQ R13, 160(AX)
|
MOVQ R13, 160(AX)
|
||||||
MOVQ br+8(FP), AX
|
MOVQ br+8(FP), AX
|
||||||
MOVQ DX, 32(AX)
|
MOVQ DX, 24(AX)
|
||||||
MOVB BL, 40(AX)
|
MOVB BL, 32(AX)
|
||||||
MOVQ SI, 24(AX)
|
MOVQ SI, 8(AX)
|
||||||
|
|
||||||
// Return success
|
// Return success
|
||||||
MOVQ $0x00000000, ret+24(FP)
|
MOVQ $0x00000000, ret+24(FP)
|
||||||
@ -320,14 +328,19 @@ error_not_enough_literals:
|
|||||||
MOVQ $0x00000004, ret+24(FP)
|
MOVQ $0x00000004, ret+24(FP)
|
||||||
RET
|
RET
|
||||||
|
|
||||||
|
// Return with overread error
|
||||||
|
error_overread:
|
||||||
|
MOVQ $0x00000006, ret+24(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
||||||
// Requires: CMOV
|
// Requires: CMOV
|
||||||
TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32
|
TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32
|
||||||
MOVQ br+8(FP), AX
|
MOVQ br+8(FP), CX
|
||||||
MOVQ 32(AX), DX
|
MOVQ 24(CX), DX
|
||||||
MOVBQZX 40(AX), BX
|
MOVBQZX 32(CX), BX
|
||||||
MOVQ 24(AX), SI
|
MOVQ (CX), AX
|
||||||
MOVQ (AX), AX
|
MOVQ 8(CX), SI
|
||||||
ADDQ SI, AX
|
ADDQ SI, AX
|
||||||
MOVQ AX, (SP)
|
MOVQ AX, (SP)
|
||||||
MOVQ ctx+16(FP), AX
|
MOVQ ctx+16(FP), AX
|
||||||
@ -356,7 +369,7 @@ sequenceDecs_decode_56_amd64_main_loop:
|
|||||||
|
|
||||||
sequenceDecs_decode_56_amd64_fill_byte_by_byte:
|
sequenceDecs_decode_56_amd64_fill_byte_by_byte:
|
||||||
CMPQ SI, $0x00
|
CMPQ SI, $0x00
|
||||||
JLE sequenceDecs_decode_56_amd64_fill_end
|
JLE sequenceDecs_decode_56_amd64_fill_check_overread
|
||||||
CMPQ BX, $0x07
|
CMPQ BX, $0x07
|
||||||
JLE sequenceDecs_decode_56_amd64_fill_end
|
JLE sequenceDecs_decode_56_amd64_fill_end
|
||||||
SHLQ $0x08, DX
|
SHLQ $0x08, DX
|
||||||
@ -367,6 +380,10 @@ sequenceDecs_decode_56_amd64_fill_byte_by_byte:
|
|||||||
ORQ AX, DX
|
ORQ AX, DX
|
||||||
JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte
|
JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte
|
||||||
|
|
||||||
|
sequenceDecs_decode_56_amd64_fill_check_overread:
|
||||||
|
CMPQ BX, $0x40
|
||||||
|
JA error_overread
|
||||||
|
|
||||||
sequenceDecs_decode_56_amd64_fill_end:
|
sequenceDecs_decode_56_amd64_fill_end:
|
||||||
// Update offset
|
// Update offset
|
||||||
MOVQ R9, AX
|
MOVQ R9, AX
|
||||||
@ -586,9 +603,9 @@ sequenceDecs_decode_56_amd64_match_len_ofs_ok:
|
|||||||
MOVQ R12, 152(AX)
|
MOVQ R12, 152(AX)
|
||||||
MOVQ R13, 160(AX)
|
MOVQ R13, 160(AX)
|
||||||
MOVQ br+8(FP), AX
|
MOVQ br+8(FP), AX
|
||||||
MOVQ DX, 32(AX)
|
MOVQ DX, 24(AX)
|
||||||
MOVB BL, 40(AX)
|
MOVB BL, 32(AX)
|
||||||
MOVQ SI, 24(AX)
|
MOVQ SI, 8(AX)
|
||||||
|
|
||||||
// Return success
|
// Return success
|
||||||
MOVQ $0x00000000, ret+24(FP)
|
MOVQ $0x00000000, ret+24(FP)
|
||||||
@ -613,14 +630,19 @@ error_not_enough_literals:
|
|||||||
MOVQ $0x00000004, ret+24(FP)
|
MOVQ $0x00000004, ret+24(FP)
|
||||||
RET
|
RET
|
||||||
|
|
||||||
|
// Return with overread error
|
||||||
|
error_overread:
|
||||||
|
MOVQ $0x00000006, ret+24(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
||||||
// Requires: BMI, BMI2, CMOV
|
// Requires: BMI, BMI2, CMOV
|
||||||
TEXT ·sequenceDecs_decode_bmi2(SB), $8-32
|
TEXT ·sequenceDecs_decode_bmi2(SB), $8-32
|
||||||
MOVQ br+8(FP), CX
|
MOVQ br+8(FP), BX
|
||||||
MOVQ 32(CX), AX
|
MOVQ 24(BX), AX
|
||||||
MOVBQZX 40(CX), DX
|
MOVBQZX 32(BX), DX
|
||||||
MOVQ 24(CX), BX
|
MOVQ (BX), CX
|
||||||
MOVQ (CX), CX
|
MOVQ 8(BX), BX
|
||||||
ADDQ BX, CX
|
ADDQ BX, CX
|
||||||
MOVQ CX, (SP)
|
MOVQ CX, (SP)
|
||||||
MOVQ ctx+16(FP), CX
|
MOVQ ctx+16(FP), CX
|
||||||
@ -649,7 +671,7 @@ sequenceDecs_decode_bmi2_main_loop:
|
|||||||
|
|
||||||
sequenceDecs_decode_bmi2_fill_byte_by_byte:
|
sequenceDecs_decode_bmi2_fill_byte_by_byte:
|
||||||
CMPQ BX, $0x00
|
CMPQ BX, $0x00
|
||||||
JLE sequenceDecs_decode_bmi2_fill_end
|
JLE sequenceDecs_decode_bmi2_fill_check_overread
|
||||||
CMPQ DX, $0x07
|
CMPQ DX, $0x07
|
||||||
JLE sequenceDecs_decode_bmi2_fill_end
|
JLE sequenceDecs_decode_bmi2_fill_end
|
||||||
SHLQ $0x08, AX
|
SHLQ $0x08, AX
|
||||||
@ -660,6 +682,10 @@ sequenceDecs_decode_bmi2_fill_byte_by_byte:
|
|||||||
ORQ CX, AX
|
ORQ CX, AX
|
||||||
JMP sequenceDecs_decode_bmi2_fill_byte_by_byte
|
JMP sequenceDecs_decode_bmi2_fill_byte_by_byte
|
||||||
|
|
||||||
|
sequenceDecs_decode_bmi2_fill_check_overread:
|
||||||
|
CMPQ DX, $0x40
|
||||||
|
JA error_overread
|
||||||
|
|
||||||
sequenceDecs_decode_bmi2_fill_end:
|
sequenceDecs_decode_bmi2_fill_end:
|
||||||
// Update offset
|
// Update offset
|
||||||
MOVQ $0x00000808, CX
|
MOVQ $0x00000808, CX
|
||||||
@ -700,7 +726,7 @@ sequenceDecs_decode_bmi2_fill_end:
|
|||||||
|
|
||||||
sequenceDecs_decode_bmi2_fill_2_byte_by_byte:
|
sequenceDecs_decode_bmi2_fill_2_byte_by_byte:
|
||||||
CMPQ BX, $0x00
|
CMPQ BX, $0x00
|
||||||
JLE sequenceDecs_decode_bmi2_fill_2_end
|
JLE sequenceDecs_decode_bmi2_fill_2_check_overread
|
||||||
CMPQ DX, $0x07
|
CMPQ DX, $0x07
|
||||||
JLE sequenceDecs_decode_bmi2_fill_2_end
|
JLE sequenceDecs_decode_bmi2_fill_2_end
|
||||||
SHLQ $0x08, AX
|
SHLQ $0x08, AX
|
||||||
@ -711,6 +737,10 @@ sequenceDecs_decode_bmi2_fill_2_byte_by_byte:
|
|||||||
ORQ CX, AX
|
ORQ CX, AX
|
||||||
JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte
|
JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte
|
||||||
|
|
||||||
|
sequenceDecs_decode_bmi2_fill_2_check_overread:
|
||||||
|
CMPQ DX, $0x40
|
||||||
|
JA error_overread
|
||||||
|
|
||||||
sequenceDecs_decode_bmi2_fill_2_end:
|
sequenceDecs_decode_bmi2_fill_2_end:
|
||||||
// Update literal length
|
// Update literal length
|
||||||
MOVQ $0x00000808, CX
|
MOVQ $0x00000808, CX
|
||||||
@ -862,9 +892,9 @@ sequenceDecs_decode_bmi2_match_len_ofs_ok:
|
|||||||
MOVQ R11, 152(CX)
|
MOVQ R11, 152(CX)
|
||||||
MOVQ R12, 160(CX)
|
MOVQ R12, 160(CX)
|
||||||
MOVQ br+8(FP), CX
|
MOVQ br+8(FP), CX
|
||||||
MOVQ AX, 32(CX)
|
MOVQ AX, 24(CX)
|
||||||
MOVB DL, 40(CX)
|
MOVB DL, 32(CX)
|
||||||
MOVQ BX, 24(CX)
|
MOVQ BX, 8(CX)
|
||||||
|
|
||||||
// Return success
|
// Return success
|
||||||
MOVQ $0x00000000, ret+24(FP)
|
MOVQ $0x00000000, ret+24(FP)
|
||||||
@ -889,14 +919,19 @@ error_not_enough_literals:
|
|||||||
MOVQ $0x00000004, ret+24(FP)
|
MOVQ $0x00000004, ret+24(FP)
|
||||||
RET
|
RET
|
||||||
|
|
||||||
|
// Return with overread error
|
||||||
|
error_overread:
|
||||||
|
MOVQ $0x00000006, ret+24(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
||||||
// Requires: BMI, BMI2, CMOV
|
// Requires: BMI, BMI2, CMOV
|
||||||
TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32
|
TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32
|
||||||
MOVQ br+8(FP), CX
|
MOVQ br+8(FP), BX
|
||||||
MOVQ 32(CX), AX
|
MOVQ 24(BX), AX
|
||||||
MOVBQZX 40(CX), DX
|
MOVBQZX 32(BX), DX
|
||||||
MOVQ 24(CX), BX
|
MOVQ (BX), CX
|
||||||
MOVQ (CX), CX
|
MOVQ 8(BX), BX
|
||||||
ADDQ BX, CX
|
ADDQ BX, CX
|
||||||
MOVQ CX, (SP)
|
MOVQ CX, (SP)
|
||||||
MOVQ ctx+16(FP), CX
|
MOVQ ctx+16(FP), CX
|
||||||
@ -925,7 +960,7 @@ sequenceDecs_decode_56_bmi2_main_loop:
|
|||||||
|
|
||||||
sequenceDecs_decode_56_bmi2_fill_byte_by_byte:
|
sequenceDecs_decode_56_bmi2_fill_byte_by_byte:
|
||||||
CMPQ BX, $0x00
|
CMPQ BX, $0x00
|
||||||
JLE sequenceDecs_decode_56_bmi2_fill_end
|
JLE sequenceDecs_decode_56_bmi2_fill_check_overread
|
||||||
CMPQ DX, $0x07
|
CMPQ DX, $0x07
|
||||||
JLE sequenceDecs_decode_56_bmi2_fill_end
|
JLE sequenceDecs_decode_56_bmi2_fill_end
|
||||||
SHLQ $0x08, AX
|
SHLQ $0x08, AX
|
||||||
@ -936,6 +971,10 @@ sequenceDecs_decode_56_bmi2_fill_byte_by_byte:
|
|||||||
ORQ CX, AX
|
ORQ CX, AX
|
||||||
JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte
|
JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte
|
||||||
|
|
||||||
|
sequenceDecs_decode_56_bmi2_fill_check_overread:
|
||||||
|
CMPQ DX, $0x40
|
||||||
|
JA error_overread
|
||||||
|
|
||||||
sequenceDecs_decode_56_bmi2_fill_end:
|
sequenceDecs_decode_56_bmi2_fill_end:
|
||||||
// Update offset
|
// Update offset
|
||||||
MOVQ $0x00000808, CX
|
MOVQ $0x00000808, CX
|
||||||
@ -1113,9 +1152,9 @@ sequenceDecs_decode_56_bmi2_match_len_ofs_ok:
|
|||||||
MOVQ R11, 152(CX)
|
MOVQ R11, 152(CX)
|
||||||
MOVQ R12, 160(CX)
|
MOVQ R12, 160(CX)
|
||||||
MOVQ br+8(FP), CX
|
MOVQ br+8(FP), CX
|
||||||
MOVQ AX, 32(CX)
|
MOVQ AX, 24(CX)
|
||||||
MOVB DL, 40(CX)
|
MOVB DL, 32(CX)
|
||||||
MOVQ BX, 24(CX)
|
MOVQ BX, 8(CX)
|
||||||
|
|
||||||
// Return success
|
// Return success
|
||||||
MOVQ $0x00000000, ret+24(FP)
|
MOVQ $0x00000000, ret+24(FP)
|
||||||
@ -1140,6 +1179,11 @@ error_not_enough_literals:
|
|||||||
MOVQ $0x00000004, ret+24(FP)
|
MOVQ $0x00000004, ret+24(FP)
|
||||||
RET
|
RET
|
||||||
|
|
||||||
|
// Return with overread error
|
||||||
|
error_overread:
|
||||||
|
MOVQ $0x00000006, ret+24(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
|
// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
|
||||||
// Requires: SSE
|
// Requires: SSE
|
||||||
TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9
|
TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9
|
||||||
@ -1753,11 +1797,11 @@ empty_seqs:
|
|||||||
// func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
|
// func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
|
||||||
// Requires: CMOV, SSE
|
// Requires: CMOV, SSE
|
||||||
TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32
|
TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32
|
||||||
MOVQ br+8(FP), AX
|
MOVQ br+8(FP), CX
|
||||||
MOVQ 32(AX), DX
|
MOVQ 24(CX), DX
|
||||||
MOVBQZX 40(AX), BX
|
MOVBQZX 32(CX), BX
|
||||||
MOVQ 24(AX), SI
|
MOVQ (CX), AX
|
||||||
MOVQ (AX), AX
|
MOVQ 8(CX), SI
|
||||||
ADDQ SI, AX
|
ADDQ SI, AX
|
||||||
MOVQ AX, (SP)
|
MOVQ AX, (SP)
|
||||||
MOVQ ctx+16(FP), AX
|
MOVQ ctx+16(FP), AX
|
||||||
@ -1804,7 +1848,7 @@ sequenceDecs_decodeSync_amd64_main_loop:
|
|||||||
|
|
||||||
sequenceDecs_decodeSync_amd64_fill_byte_by_byte:
|
sequenceDecs_decodeSync_amd64_fill_byte_by_byte:
|
||||||
CMPQ SI, $0x00
|
CMPQ SI, $0x00
|
||||||
JLE sequenceDecs_decodeSync_amd64_fill_end
|
JLE sequenceDecs_decodeSync_amd64_fill_check_overread
|
||||||
CMPQ BX, $0x07
|
CMPQ BX, $0x07
|
||||||
JLE sequenceDecs_decodeSync_amd64_fill_end
|
JLE sequenceDecs_decodeSync_amd64_fill_end
|
||||||
SHLQ $0x08, DX
|
SHLQ $0x08, DX
|
||||||
@ -1815,6 +1859,10 @@ sequenceDecs_decodeSync_amd64_fill_byte_by_byte:
|
|||||||
ORQ AX, DX
|
ORQ AX, DX
|
||||||
JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte
|
JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte
|
||||||
|
|
||||||
|
sequenceDecs_decodeSync_amd64_fill_check_overread:
|
||||||
|
CMPQ BX, $0x40
|
||||||
|
JA error_overread
|
||||||
|
|
||||||
sequenceDecs_decodeSync_amd64_fill_end:
|
sequenceDecs_decodeSync_amd64_fill_end:
|
||||||
// Update offset
|
// Update offset
|
||||||
MOVQ R9, AX
|
MOVQ R9, AX
|
||||||
@ -1871,7 +1919,7 @@ sequenceDecs_decodeSync_amd64_ml_update_zero:
|
|||||||
|
|
||||||
sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte:
|
sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte:
|
||||||
CMPQ SI, $0x00
|
CMPQ SI, $0x00
|
||||||
JLE sequenceDecs_decodeSync_amd64_fill_2_end
|
JLE sequenceDecs_decodeSync_amd64_fill_2_check_overread
|
||||||
CMPQ BX, $0x07
|
CMPQ BX, $0x07
|
||||||
JLE sequenceDecs_decodeSync_amd64_fill_2_end
|
JLE sequenceDecs_decodeSync_amd64_fill_2_end
|
||||||
SHLQ $0x08, DX
|
SHLQ $0x08, DX
|
||||||
@ -1882,6 +1930,10 @@ sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte:
|
|||||||
ORQ AX, DX
|
ORQ AX, DX
|
||||||
JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte
|
JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte
|
||||||
|
|
||||||
|
sequenceDecs_decodeSync_amd64_fill_2_check_overread:
|
||||||
|
CMPQ BX, $0x40
|
||||||
|
JA error_overread
|
||||||
|
|
||||||
sequenceDecs_decodeSync_amd64_fill_2_end:
|
sequenceDecs_decodeSync_amd64_fill_2_end:
|
||||||
// Update literal length
|
// Update literal length
|
||||||
MOVQ DI, AX
|
MOVQ DI, AX
|
||||||
@ -2243,9 +2295,9 @@ handle_loop:
|
|||||||
|
|
||||||
loop_finished:
|
loop_finished:
|
||||||
MOVQ br+8(FP), AX
|
MOVQ br+8(FP), AX
|
||||||
MOVQ DX, 32(AX)
|
MOVQ DX, 24(AX)
|
||||||
MOVB BL, 40(AX)
|
MOVB BL, 32(AX)
|
||||||
MOVQ SI, 24(AX)
|
MOVQ SI, 8(AX)
|
||||||
|
|
||||||
// Update the context
|
// Update the context
|
||||||
MOVQ ctx+16(FP), AX
|
MOVQ ctx+16(FP), AX
|
||||||
@ -2291,6 +2343,11 @@ error_not_enough_literals:
|
|||||||
MOVQ $0x00000004, ret+24(FP)
|
MOVQ $0x00000004, ret+24(FP)
|
||||||
RET
|
RET
|
||||||
|
|
||||||
|
// Return with overread error
|
||||||
|
error_overread:
|
||||||
|
MOVQ $0x00000006, ret+24(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
// Return with not enough output space error
|
// Return with not enough output space error
|
||||||
error_not_enough_space:
|
error_not_enough_space:
|
||||||
MOVQ ctx+16(FP), AX
|
MOVQ ctx+16(FP), AX
|
||||||
@ -2305,11 +2362,11 @@ error_not_enough_space:
|
|||||||
// func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
|
// func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
|
||||||
// Requires: BMI, BMI2, CMOV, SSE
|
// Requires: BMI, BMI2, CMOV, SSE
|
||||||
TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32
|
TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32
|
||||||
MOVQ br+8(FP), CX
|
MOVQ br+8(FP), BX
|
||||||
MOVQ 32(CX), AX
|
MOVQ 24(BX), AX
|
||||||
MOVBQZX 40(CX), DX
|
MOVBQZX 32(BX), DX
|
||||||
MOVQ 24(CX), BX
|
MOVQ (BX), CX
|
||||||
MOVQ (CX), CX
|
MOVQ 8(BX), BX
|
||||||
ADDQ BX, CX
|
ADDQ BX, CX
|
||||||
MOVQ CX, (SP)
|
MOVQ CX, (SP)
|
||||||
MOVQ ctx+16(FP), CX
|
MOVQ ctx+16(FP), CX
|
||||||
@ -2356,7 +2413,7 @@ sequenceDecs_decodeSync_bmi2_main_loop:
|
|||||||
|
|
||||||
sequenceDecs_decodeSync_bmi2_fill_byte_by_byte:
|
sequenceDecs_decodeSync_bmi2_fill_byte_by_byte:
|
||||||
CMPQ BX, $0x00
|
CMPQ BX, $0x00
|
||||||
JLE sequenceDecs_decodeSync_bmi2_fill_end
|
JLE sequenceDecs_decodeSync_bmi2_fill_check_overread
|
||||||
CMPQ DX, $0x07
|
CMPQ DX, $0x07
|
||||||
JLE sequenceDecs_decodeSync_bmi2_fill_end
|
JLE sequenceDecs_decodeSync_bmi2_fill_end
|
||||||
SHLQ $0x08, AX
|
SHLQ $0x08, AX
|
||||||
@ -2367,6 +2424,10 @@ sequenceDecs_decodeSync_bmi2_fill_byte_by_byte:
|
|||||||
ORQ CX, AX
|
ORQ CX, AX
|
||||||
JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte
|
JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte
|
||||||
|
|
||||||
|
sequenceDecs_decodeSync_bmi2_fill_check_overread:
|
||||||
|
CMPQ DX, $0x40
|
||||||
|
JA error_overread
|
||||||
|
|
||||||
sequenceDecs_decodeSync_bmi2_fill_end:
|
sequenceDecs_decodeSync_bmi2_fill_end:
|
||||||
// Update offset
|
// Update offset
|
||||||
MOVQ $0x00000808, CX
|
MOVQ $0x00000808, CX
|
||||||
@ -2407,7 +2468,7 @@ sequenceDecs_decodeSync_bmi2_fill_end:
|
|||||||
|
|
||||||
sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte:
|
sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte:
|
||||||
CMPQ BX, $0x00
|
CMPQ BX, $0x00
|
||||||
JLE sequenceDecs_decodeSync_bmi2_fill_2_end
|
JLE sequenceDecs_decodeSync_bmi2_fill_2_check_overread
|
||||||
CMPQ DX, $0x07
|
CMPQ DX, $0x07
|
||||||
JLE sequenceDecs_decodeSync_bmi2_fill_2_end
|
JLE sequenceDecs_decodeSync_bmi2_fill_2_end
|
||||||
SHLQ $0x08, AX
|
SHLQ $0x08, AX
|
||||||
@ -2418,6 +2479,10 @@ sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte:
|
|||||||
ORQ CX, AX
|
ORQ CX, AX
|
||||||
JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte
|
JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte
|
||||||
|
|
||||||
|
sequenceDecs_decodeSync_bmi2_fill_2_check_overread:
|
||||||
|
CMPQ DX, $0x40
|
||||||
|
JA error_overread
|
||||||
|
|
||||||
sequenceDecs_decodeSync_bmi2_fill_2_end:
|
sequenceDecs_decodeSync_bmi2_fill_2_end:
|
||||||
// Update literal length
|
// Update literal length
|
||||||
MOVQ $0x00000808, CX
|
MOVQ $0x00000808, CX
|
||||||
@ -2753,9 +2818,9 @@ handle_loop:
|
|||||||
|
|
||||||
loop_finished:
|
loop_finished:
|
||||||
MOVQ br+8(FP), CX
|
MOVQ br+8(FP), CX
|
||||||
MOVQ AX, 32(CX)
|
MOVQ AX, 24(CX)
|
||||||
MOVB DL, 40(CX)
|
MOVB DL, 32(CX)
|
||||||
MOVQ BX, 24(CX)
|
MOVQ BX, 8(CX)
|
||||||
|
|
||||||
// Update the context
|
// Update the context
|
||||||
MOVQ ctx+16(FP), AX
|
MOVQ ctx+16(FP), AX
|
||||||
@ -2801,6 +2866,11 @@ error_not_enough_literals:
|
|||||||
MOVQ $0x00000004, ret+24(FP)
|
MOVQ $0x00000004, ret+24(FP)
|
||||||
RET
|
RET
|
||||||
|
|
||||||
|
// Return with overread error
|
||||||
|
error_overread:
|
||||||
|
MOVQ $0x00000006, ret+24(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
// Return with not enough output space error
|
// Return with not enough output space error
|
||||||
error_not_enough_space:
|
error_not_enough_space:
|
||||||
MOVQ ctx+16(FP), AX
|
MOVQ ctx+16(FP), AX
|
||||||
@ -2815,11 +2885,11 @@ error_not_enough_space:
|
|||||||
// func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
|
// func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
|
||||||
// Requires: CMOV, SSE
|
// Requires: CMOV, SSE
|
||||||
TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32
|
TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32
|
||||||
MOVQ br+8(FP), AX
|
MOVQ br+8(FP), CX
|
||||||
MOVQ 32(AX), DX
|
MOVQ 24(CX), DX
|
||||||
MOVBQZX 40(AX), BX
|
MOVBQZX 32(CX), BX
|
||||||
MOVQ 24(AX), SI
|
MOVQ (CX), AX
|
||||||
MOVQ (AX), AX
|
MOVQ 8(CX), SI
|
||||||
ADDQ SI, AX
|
ADDQ SI, AX
|
||||||
MOVQ AX, (SP)
|
MOVQ AX, (SP)
|
||||||
MOVQ ctx+16(FP), AX
|
MOVQ ctx+16(FP), AX
|
||||||
@ -2866,7 +2936,7 @@ sequenceDecs_decodeSync_safe_amd64_main_loop:
|
|||||||
|
|
||||||
sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte:
|
sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte:
|
||||||
CMPQ SI, $0x00
|
CMPQ SI, $0x00
|
||||||
JLE sequenceDecs_decodeSync_safe_amd64_fill_end
|
JLE sequenceDecs_decodeSync_safe_amd64_fill_check_overread
|
||||||
CMPQ BX, $0x07
|
CMPQ BX, $0x07
|
||||||
JLE sequenceDecs_decodeSync_safe_amd64_fill_end
|
JLE sequenceDecs_decodeSync_safe_amd64_fill_end
|
||||||
SHLQ $0x08, DX
|
SHLQ $0x08, DX
|
||||||
@ -2877,6 +2947,10 @@ sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte:
|
|||||||
ORQ AX, DX
|
ORQ AX, DX
|
||||||
JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte
|
JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte
|
||||||
|
|
||||||
|
sequenceDecs_decodeSync_safe_amd64_fill_check_overread:
|
||||||
|
CMPQ BX, $0x40
|
||||||
|
JA error_overread
|
||||||
|
|
||||||
sequenceDecs_decodeSync_safe_amd64_fill_end:
|
sequenceDecs_decodeSync_safe_amd64_fill_end:
|
||||||
// Update offset
|
// Update offset
|
||||||
MOVQ R9, AX
|
MOVQ R9, AX
|
||||||
@ -2933,7 +3007,7 @@ sequenceDecs_decodeSync_safe_amd64_ml_update_zero:
|
|||||||
|
|
||||||
sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte:
|
sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte:
|
||||||
CMPQ SI, $0x00
|
CMPQ SI, $0x00
|
||||||
JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end
|
JLE sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread
|
||||||
CMPQ BX, $0x07
|
CMPQ BX, $0x07
|
||||||
JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end
|
JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end
|
||||||
SHLQ $0x08, DX
|
SHLQ $0x08, DX
|
||||||
@ -2944,6 +3018,10 @@ sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte:
|
|||||||
ORQ AX, DX
|
ORQ AX, DX
|
||||||
JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte
|
JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte
|
||||||
|
|
||||||
|
sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread:
|
||||||
|
CMPQ BX, $0x40
|
||||||
|
JA error_overread
|
||||||
|
|
||||||
sequenceDecs_decodeSync_safe_amd64_fill_2_end:
|
sequenceDecs_decodeSync_safe_amd64_fill_2_end:
|
||||||
// Update literal length
|
// Update literal length
|
||||||
MOVQ DI, AX
|
MOVQ DI, AX
|
||||||
@ -3407,9 +3485,9 @@ handle_loop:
|
|||||||
|
|
||||||
loop_finished:
|
loop_finished:
|
||||||
MOVQ br+8(FP), AX
|
MOVQ br+8(FP), AX
|
||||||
MOVQ DX, 32(AX)
|
MOVQ DX, 24(AX)
|
||||||
MOVB BL, 40(AX)
|
MOVB BL, 32(AX)
|
||||||
MOVQ SI, 24(AX)
|
MOVQ SI, 8(AX)
|
||||||
|
|
||||||
// Update the context
|
// Update the context
|
||||||
MOVQ ctx+16(FP), AX
|
MOVQ ctx+16(FP), AX
|
||||||
@ -3455,6 +3533,11 @@ error_not_enough_literals:
|
|||||||
MOVQ $0x00000004, ret+24(FP)
|
MOVQ $0x00000004, ret+24(FP)
|
||||||
RET
|
RET
|
||||||
|
|
||||||
|
// Return with overread error
|
||||||
|
error_overread:
|
||||||
|
MOVQ $0x00000006, ret+24(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
// Return with not enough output space error
|
// Return with not enough output space error
|
||||||
error_not_enough_space:
|
error_not_enough_space:
|
||||||
MOVQ ctx+16(FP), AX
|
MOVQ ctx+16(FP), AX
|
||||||
@ -3469,11 +3552,11 @@ error_not_enough_space:
|
|||||||
// func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
|
// func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
|
||||||
// Requires: BMI, BMI2, CMOV, SSE
|
// Requires: BMI, BMI2, CMOV, SSE
|
||||||
TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32
|
TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32
|
||||||
MOVQ br+8(FP), CX
|
MOVQ br+8(FP), BX
|
||||||
MOVQ 32(CX), AX
|
MOVQ 24(BX), AX
|
||||||
MOVBQZX 40(CX), DX
|
MOVBQZX 32(BX), DX
|
||||||
MOVQ 24(CX), BX
|
MOVQ (BX), CX
|
||||||
MOVQ (CX), CX
|
MOVQ 8(BX), BX
|
||||||
ADDQ BX, CX
|
ADDQ BX, CX
|
||||||
MOVQ CX, (SP)
|
MOVQ CX, (SP)
|
||||||
MOVQ ctx+16(FP), CX
|
MOVQ ctx+16(FP), CX
|
||||||
@ -3520,7 +3603,7 @@ sequenceDecs_decodeSync_safe_bmi2_main_loop:
|
|||||||
|
|
||||||
sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte:
|
sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte:
|
||||||
CMPQ BX, $0x00
|
CMPQ BX, $0x00
|
||||||
JLE sequenceDecs_decodeSync_safe_bmi2_fill_end
|
JLE sequenceDecs_decodeSync_safe_bmi2_fill_check_overread
|
||||||
CMPQ DX, $0x07
|
CMPQ DX, $0x07
|
||||||
JLE sequenceDecs_decodeSync_safe_bmi2_fill_end
|
JLE sequenceDecs_decodeSync_safe_bmi2_fill_end
|
||||||
SHLQ $0x08, AX
|
SHLQ $0x08, AX
|
||||||
@ -3531,6 +3614,10 @@ sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte:
|
|||||||
ORQ CX, AX
|
ORQ CX, AX
|
||||||
JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte
|
JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte
|
||||||
|
|
||||||
|
sequenceDecs_decodeSync_safe_bmi2_fill_check_overread:
|
||||||
|
CMPQ DX, $0x40
|
||||||
|
JA error_overread
|
||||||
|
|
||||||
sequenceDecs_decodeSync_safe_bmi2_fill_end:
|
sequenceDecs_decodeSync_safe_bmi2_fill_end:
|
||||||
// Update offset
|
// Update offset
|
||||||
MOVQ $0x00000808, CX
|
MOVQ $0x00000808, CX
|
||||||
@ -3571,7 +3658,7 @@ sequenceDecs_decodeSync_safe_bmi2_fill_end:
|
|||||||
|
|
||||||
sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte:
|
sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte:
|
||||||
CMPQ BX, $0x00
|
CMPQ BX, $0x00
|
||||||
JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end
|
JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread
|
||||||
CMPQ DX, $0x07
|
CMPQ DX, $0x07
|
||||||
JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end
|
JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end
|
||||||
SHLQ $0x08, AX
|
SHLQ $0x08, AX
|
||||||
@ -3582,6 +3669,10 @@ sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte:
|
|||||||
ORQ CX, AX
|
ORQ CX, AX
|
||||||
JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte
|
JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte
|
||||||
|
|
||||||
|
sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread:
|
||||||
|
CMPQ DX, $0x40
|
||||||
|
JA error_overread
|
||||||
|
|
||||||
sequenceDecs_decodeSync_safe_bmi2_fill_2_end:
|
sequenceDecs_decodeSync_safe_bmi2_fill_2_end:
|
||||||
// Update literal length
|
// Update literal length
|
||||||
MOVQ $0x00000808, CX
|
MOVQ $0x00000808, CX
|
||||||
@ -4019,9 +4110,9 @@ handle_loop:
|
|||||||
|
|
||||||
loop_finished:
|
loop_finished:
|
||||||
MOVQ br+8(FP), CX
|
MOVQ br+8(FP), CX
|
||||||
MOVQ AX, 32(CX)
|
MOVQ AX, 24(CX)
|
||||||
MOVB DL, 40(CX)
|
MOVB DL, 32(CX)
|
||||||
MOVQ BX, 24(CX)
|
MOVQ BX, 8(CX)
|
||||||
|
|
||||||
// Update the context
|
// Update the context
|
||||||
MOVQ ctx+16(FP), AX
|
MOVQ ctx+16(FP), AX
|
||||||
@ -4067,6 +4158,11 @@ error_not_enough_literals:
|
|||||||
MOVQ $0x00000004, ret+24(FP)
|
MOVQ $0x00000004, ret+24(FP)
|
||||||
RET
|
RET
|
||||||
|
|
||||||
|
// Return with overread error
|
||||||
|
error_overread:
|
||||||
|
MOVQ $0x00000006, ret+24(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
// Return with not enough output space error
|
// Return with not enough output space error
|
||||||
error_not_enough_space:
|
error_not_enough_space:
|
||||||
MOVQ ctx+16(FP), AX
|
MOVQ ctx+16(FP), AX
|
||||||
|
2
vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
generated
vendored
2
vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
generated
vendored
@ -29,7 +29,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
|
|||||||
}
|
}
|
||||||
for i := range seqs {
|
for i := range seqs {
|
||||||
var ll, mo, ml int
|
var ll, mo, ml int
|
||||||
if br.off > 4+((maxOffsetBits+16+16)>>3) {
|
if len(br.in) > 4+((maxOffsetBits+16+16)>>3) {
|
||||||
// inlined function:
|
// inlined function:
|
||||||
// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
|
// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
|
||||||
|
|
||||||
|
5
vendor/github.com/klauspost/compress/zstd/snappy.go
generated
vendored
5
vendor/github.com/klauspost/compress/zstd/snappy.go
generated
vendored
@ -95,10 +95,9 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
|
|||||||
var written int64
|
var written int64
|
||||||
var readHeader bool
|
var readHeader bool
|
||||||
{
|
{
|
||||||
var header []byte
|
header := frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0])
|
||||||
var n int
|
|
||||||
header, r.err = frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0])
|
|
||||||
|
|
||||||
|
var n int
|
||||||
n, r.err = w.Write(header)
|
n, r.err = w.Write(header)
|
||||||
if r.err != nil {
|
if r.err != nil {
|
||||||
return written, r.err
|
return written, r.err
|
||||||
|
26
vendor/github.com/klauspost/compress/zstd/zstd.go
generated
vendored
26
vendor/github.com/klauspost/compress/zstd/zstd.go
generated
vendored
@ -9,7 +9,6 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"log"
|
"log"
|
||||||
"math"
|
"math"
|
||||||
"math/bits"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// enable debug printing
|
// enable debug printing
|
||||||
@ -106,33 +105,12 @@ func printf(format string, a ...interface{}) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// matchLen returns the maximum common prefix length of a and b.
|
|
||||||
// a must be the shortest of the two.
|
|
||||||
func matchLen(a, b []byte) (n int) {
|
|
||||||
for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] {
|
|
||||||
diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b)
|
|
||||||
if diff != 0 {
|
|
||||||
return n + bits.TrailingZeros64(diff)>>3
|
|
||||||
}
|
|
||||||
n += 8
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := range a {
|
|
||||||
if a[i] != b[i] {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
n++
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func load3232(b []byte, i int32) uint32 {
|
func load3232(b []byte, i int32) uint32 {
|
||||||
return binary.LittleEndian.Uint32(b[i:])
|
return binary.LittleEndian.Uint32(b[:len(b):len(b)][i:])
|
||||||
}
|
}
|
||||||
|
|
||||||
func load6432(b []byte, i int32) uint64 {
|
func load6432(b []byte, i int32) uint64 {
|
||||||
return binary.LittleEndian.Uint64(b[i:])
|
return binary.LittleEndian.Uint64(b[:len(b):len(b)][i:])
|
||||||
}
|
}
|
||||||
|
|
||||||
type byter interface {
|
type byter interface {
|
||||||
|
41
vendor/github.com/labstack/echo/v4/CHANGELOG.md
generated
vendored
41
vendor/github.com/labstack/echo/v4/CHANGELOG.md
generated
vendored
@ -1,5 +1,46 @@
|
|||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
|
## v4.11.4 - 2023-12-20
|
||||||
|
|
||||||
|
**Security**
|
||||||
|
|
||||||
|
* Upgrade golang.org/x/crypto to v0.17.0 to fix vulnerability [issue](https://pkg.go.dev/vuln/GO-2023-2402) [#2562](https://github.com/labstack/echo/pull/2562)
|
||||||
|
|
||||||
|
**Enhancements**
|
||||||
|
|
||||||
|
* Update deps and mark Go version to 1.18 as this is what golang.org/x/* use [#2563](https://github.com/labstack/echo/pull/2563)
|
||||||
|
* Request logger: add example for Slog https://pkg.go.dev/log/slog [#2543](https://github.com/labstack/echo/pull/2543)
|
||||||
|
|
||||||
|
|
||||||
|
## v4.11.3 - 2023-11-07
|
||||||
|
|
||||||
|
**Security**
|
||||||
|
|
||||||
|
* 'c.Attachment' and 'c.Inline' should escape filename in 'Content-Disposition' header to avoid 'Reflect File Download' vulnerability. [#2541](https://github.com/labstack/echo/pull/2541)
|
||||||
|
|
||||||
|
**Enhancements**
|
||||||
|
|
||||||
|
* Tests: refactor context tests to be separate functions [#2540](https://github.com/labstack/echo/pull/2540)
|
||||||
|
* Proxy middleware: reuse echo request context [#2537](https://github.com/labstack/echo/pull/2537)
|
||||||
|
* Mark unmarshallable yaml struct tags as ignored [#2536](https://github.com/labstack/echo/pull/2536)
|
||||||
|
|
||||||
|
|
||||||
|
## v4.11.2 - 2023-10-11
|
||||||
|
|
||||||
|
**Security**
|
||||||
|
|
||||||
|
* Bump golang.org/x/net to prevent CVE-2023-39325 / CVE-2023-44487 HTTP/2 Rapid Reset Attack [#2527](https://github.com/labstack/echo/pull/2527)
|
||||||
|
* fix(sec): randomString bias introduced by #2490 [#2492](https://github.com/labstack/echo/pull/2492)
|
||||||
|
* CSRF/RequestID mw: switch math/random usage to crypto/random [#2490](https://github.com/labstack/echo/pull/2490)
|
||||||
|
|
||||||
|
**Enhancements**
|
||||||
|
|
||||||
|
* Delete unused context in body_limit.go [#2483](https://github.com/labstack/echo/pull/2483)
|
||||||
|
* Use Go 1.21 in CI [#2505](https://github.com/labstack/echo/pull/2505)
|
||||||
|
* Fix some typos [#2511](https://github.com/labstack/echo/pull/2511)
|
||||||
|
* Allow CORS middleware to send Access-Control-Max-Age: 0 [#2518](https://github.com/labstack/echo/pull/2518)
|
||||||
|
* Bump dependancies [#2522](https://github.com/labstack/echo/pull/2522)
|
||||||
|
|
||||||
## v4.11.1 - 2023-07-16
|
## v4.11.1 - 2023-07-16
|
||||||
|
|
||||||
**Fixes**
|
**Fixes**
|
||||||
|
2
vendor/github.com/labstack/echo/v4/README.md
generated
vendored
2
vendor/github.com/labstack/echo/v4/README.md
generated
vendored
@ -3,7 +3,7 @@
|
|||||||
[![Sourcegraph](https://sourcegraph.com/github.com/labstack/echo/-/badge.svg?style=flat-square)](https://sourcegraph.com/github.com/labstack/echo?badge)
|
[![Sourcegraph](https://sourcegraph.com/github.com/labstack/echo/-/badge.svg?style=flat-square)](https://sourcegraph.com/github.com/labstack/echo?badge)
|
||||||
[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://pkg.go.dev/github.com/labstack/echo/v4)
|
[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://pkg.go.dev/github.com/labstack/echo/v4)
|
||||||
[![Go Report Card](https://goreportcard.com/badge/github.com/labstack/echo?style=flat-square)](https://goreportcard.com/report/github.com/labstack/echo)
|
[![Go Report Card](https://goreportcard.com/badge/github.com/labstack/echo?style=flat-square)](https://goreportcard.com/report/github.com/labstack/echo)
|
||||||
[![Build Status](http://img.shields.io/travis/labstack/echo.svg?style=flat-square)](https://travis-ci.org/labstack/echo)
|
[![GitHub Workflow Status (with event)](https://img.shields.io/github/actions/workflow/status/labstack/echo/echo.yml?style=flat-square)](https://github.com/labstack/echo/actions)
|
||||||
[![Codecov](https://img.shields.io/codecov/c/github/labstack/echo.svg?style=flat-square)](https://codecov.io/gh/labstack/echo)
|
[![Codecov](https://img.shields.io/codecov/c/github/labstack/echo.svg?style=flat-square)](https://codecov.io/gh/labstack/echo)
|
||||||
[![Forum](https://img.shields.io/badge/community-forum-00afd1.svg?style=flat-square)](https://github.com/labstack/echo/discussions)
|
[![Forum](https://img.shields.io/badge/community-forum-00afd1.svg?style=flat-square)](https://github.com/labstack/echo/discussions)
|
||||||
[![Twitter](https://img.shields.io/badge/twitter-@labstack-55acee.svg?style=flat-square)](https://twitter.com/labstack)
|
[![Twitter](https://img.shields.io/badge/twitter-@labstack-55acee.svg?style=flat-square)](https://twitter.com/labstack)
|
||||||
|
2
vendor/github.com/labstack/echo/v4/binder.go
generated
vendored
2
vendor/github.com/labstack/echo/v4/binder.go
generated
vendored
@ -1323,7 +1323,7 @@ func (b *ValueBinder) unixTime(sourceParam string, dest *time.Time, valueMustExi
|
|||||||
case time.Second:
|
case time.Second:
|
||||||
*dest = time.Unix(n, 0)
|
*dest = time.Unix(n, 0)
|
||||||
case time.Millisecond:
|
case time.Millisecond:
|
||||||
*dest = time.Unix(n/1e3, (n%1e3)*1e6) // TODO: time.UnixMilli(n) exists since Go1.17 switch to that when min version allows
|
*dest = time.UnixMilli(n)
|
||||||
case time.Nanosecond:
|
case time.Nanosecond:
|
||||||
*dest = time.Unix(0, n)
|
*dest = time.Unix(0, n)
|
||||||
}
|
}
|
||||||
|
4
vendor/github.com/labstack/echo/v4/context.go
generated
vendored
4
vendor/github.com/labstack/echo/v4/context.go
generated
vendored
@ -584,8 +584,10 @@ func (c *context) Inline(file, name string) error {
|
|||||||
return c.contentDisposition(file, name, "inline")
|
return c.contentDisposition(file, name, "inline")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var quoteEscaper = strings.NewReplacer("\\", "\\\\", `"`, "\\\"")
|
||||||
|
|
||||||
func (c *context) contentDisposition(file, name, dispositionType string) error {
|
func (c *context) contentDisposition(file, name, dispositionType string) error {
|
||||||
c.response.Header().Set(HeaderContentDisposition, fmt.Sprintf("%s; filename=%q", dispositionType, name))
|
c.response.Header().Set(HeaderContentDisposition, fmt.Sprintf(`%s; filename="%s"`, dispositionType, quoteEscaper.Replace(name)))
|
||||||
return c.File(file)
|
return c.File(file)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
2
vendor/github.com/labstack/echo/v4/echo.go
generated
vendored
2
vendor/github.com/labstack/echo/v4/echo.go
generated
vendored
@ -259,7 +259,7 @@ const (
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
// Version of Echo
|
// Version of Echo
|
||||||
Version = "4.11.1"
|
Version = "4.11.4"
|
||||||
website = "https://echo.labstack.com"
|
website = "https://echo.labstack.com"
|
||||||
// http://patorjk.com/software/taag/#p=display&f=Small%20Slant&t=Echo
|
// http://patorjk.com/software/taag/#p=display&f=Small%20Slant&t=Echo
|
||||||
banner = `
|
banner = `
|
||||||
|
10
vendor/github.com/labstack/echo/v4/middleware/body_limit.go
generated
vendored
10
vendor/github.com/labstack/echo/v4/middleware/body_limit.go
generated
vendored
@ -23,9 +23,8 @@ type (
|
|||||||
|
|
||||||
limitedReader struct {
|
limitedReader struct {
|
||||||
BodyLimitConfig
|
BodyLimitConfig
|
||||||
reader io.ReadCloser
|
reader io.ReadCloser
|
||||||
read int64
|
read int64
|
||||||
context echo.Context
|
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -80,7 +79,7 @@ func BodyLimitWithConfig(config BodyLimitConfig) echo.MiddlewareFunc {
|
|||||||
|
|
||||||
// Based on content read
|
// Based on content read
|
||||||
r := pool.Get().(*limitedReader)
|
r := pool.Get().(*limitedReader)
|
||||||
r.Reset(req.Body, c)
|
r.Reset(req.Body)
|
||||||
defer pool.Put(r)
|
defer pool.Put(r)
|
||||||
req.Body = r
|
req.Body = r
|
||||||
|
|
||||||
@ -102,9 +101,8 @@ func (r *limitedReader) Close() error {
|
|||||||
return r.reader.Close()
|
return r.reader.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *limitedReader) Reset(reader io.ReadCloser, context echo.Context) {
|
func (r *limitedReader) Reset(reader io.ReadCloser) {
|
||||||
r.reader = reader
|
r.reader = reader
|
||||||
r.context = context
|
|
||||||
r.read = 0
|
r.read = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
2
vendor/github.com/labstack/echo/v4/middleware/context_timeout.go
generated
vendored
2
vendor/github.com/labstack/echo/v4/middleware/context_timeout.go
generated
vendored
@ -13,7 +13,7 @@ type ContextTimeoutConfig struct {
|
|||||||
// Skipper defines a function to skip middleware.
|
// Skipper defines a function to skip middleware.
|
||||||
Skipper Skipper
|
Skipper Skipper
|
||||||
|
|
||||||
// ErrorHandler is a function when error aries in middeware execution.
|
// ErrorHandler is a function when error aries in middleware execution.
|
||||||
ErrorHandler func(err error, c echo.Context) error
|
ErrorHandler func(err error, c echo.Context) error
|
||||||
|
|
||||||
// Timeout configures a timeout for the middleware, defaults to 0 for no timeout
|
// Timeout configures a timeout for the middleware, defaults to 0 for no timeout
|
||||||
|
13
vendor/github.com/labstack/echo/v4/middleware/cors.go
generated
vendored
13
vendor/github.com/labstack/echo/v4/middleware/cors.go
generated
vendored
@ -39,7 +39,7 @@ type (
|
|||||||
// See https://blog.portswigger.net/2016/10/exploiting-cors-misconfigurations-for.html
|
// See https://blog.portswigger.net/2016/10/exploiting-cors-misconfigurations-for.html
|
||||||
//
|
//
|
||||||
// Optional.
|
// Optional.
|
||||||
AllowOriginFunc func(origin string) (bool, error) `yaml:"allow_origin_func"`
|
AllowOriginFunc func(origin string) (bool, error) `yaml:"-"`
|
||||||
|
|
||||||
// AllowMethods determines the value of the Access-Control-Allow-Methods
|
// AllowMethods determines the value of the Access-Control-Allow-Methods
|
||||||
// response header. This header specified the list of methods allowed when
|
// response header. This header specified the list of methods allowed when
|
||||||
@ -99,8 +99,9 @@ type (
|
|||||||
// MaxAge determines the value of the Access-Control-Max-Age response header.
|
// MaxAge determines the value of the Access-Control-Max-Age response header.
|
||||||
// This header indicates how long (in seconds) the results of a preflight
|
// This header indicates how long (in seconds) the results of a preflight
|
||||||
// request can be cached.
|
// request can be cached.
|
||||||
|
// The header is set only if MaxAge != 0, negative value sends "0" which instructs browsers not to cache that response.
|
||||||
//
|
//
|
||||||
// Optional. Default value 0. The header is set only if MaxAge > 0.
|
// Optional. Default value 0 - meaning header is not sent.
|
||||||
//
|
//
|
||||||
// See also: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Max-Age
|
// See also: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Max-Age
|
||||||
MaxAge int `yaml:"max_age"`
|
MaxAge int `yaml:"max_age"`
|
||||||
@ -159,7 +160,11 @@ func CORSWithConfig(config CORSConfig) echo.MiddlewareFunc {
|
|||||||
allowMethods := strings.Join(config.AllowMethods, ",")
|
allowMethods := strings.Join(config.AllowMethods, ",")
|
||||||
allowHeaders := strings.Join(config.AllowHeaders, ",")
|
allowHeaders := strings.Join(config.AllowHeaders, ",")
|
||||||
exposeHeaders := strings.Join(config.ExposeHeaders, ",")
|
exposeHeaders := strings.Join(config.ExposeHeaders, ",")
|
||||||
maxAge := strconv.Itoa(config.MaxAge)
|
|
||||||
|
maxAge := "0"
|
||||||
|
if config.MaxAge > 0 {
|
||||||
|
maxAge = strconv.Itoa(config.MaxAge)
|
||||||
|
}
|
||||||
|
|
||||||
return func(next echo.HandlerFunc) echo.HandlerFunc {
|
return func(next echo.HandlerFunc) echo.HandlerFunc {
|
||||||
return func(c echo.Context) error {
|
return func(c echo.Context) error {
|
||||||
@ -282,7 +287,7 @@ func CORSWithConfig(config CORSConfig) echo.MiddlewareFunc {
|
|||||||
res.Header().Set(echo.HeaderAccessControlAllowHeaders, h)
|
res.Header().Set(echo.HeaderAccessControlAllowHeaders, h)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if config.MaxAge > 0 {
|
if config.MaxAge != 0 {
|
||||||
res.Header().Set(echo.HeaderAccessControlMaxAge, maxAge)
|
res.Header().Set(echo.HeaderAccessControlMaxAge, maxAge)
|
||||||
}
|
}
|
||||||
return c.NoContent(http.StatusNoContent)
|
return c.NoContent(http.StatusNoContent)
|
||||||
|
4
vendor/github.com/labstack/echo/v4/middleware/csrf.go
generated
vendored
4
vendor/github.com/labstack/echo/v4/middleware/csrf.go
generated
vendored
@ -6,7 +6,6 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/labstack/echo/v4"
|
"github.com/labstack/echo/v4"
|
||||||
"github.com/labstack/gommon/random"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
@ -103,6 +102,7 @@ func CSRFWithConfig(config CSRFConfig) echo.MiddlewareFunc {
|
|||||||
if config.TokenLength == 0 {
|
if config.TokenLength == 0 {
|
||||||
config.TokenLength = DefaultCSRFConfig.TokenLength
|
config.TokenLength = DefaultCSRFConfig.TokenLength
|
||||||
}
|
}
|
||||||
|
|
||||||
if config.TokenLookup == "" {
|
if config.TokenLookup == "" {
|
||||||
config.TokenLookup = DefaultCSRFConfig.TokenLookup
|
config.TokenLookup = DefaultCSRFConfig.TokenLookup
|
||||||
}
|
}
|
||||||
@ -132,7 +132,7 @@ func CSRFWithConfig(config CSRFConfig) echo.MiddlewareFunc {
|
|||||||
|
|
||||||
token := ""
|
token := ""
|
||||||
if k, err := c.Cookie(config.CookieName); err != nil {
|
if k, err := c.Cookie(config.CookieName); err != nil {
|
||||||
token = random.String(config.TokenLength) // Generate token
|
token = randomString(config.TokenLength)
|
||||||
} else {
|
} else {
|
||||||
token = k.Value // Reuse token
|
token = k.Value // Reuse token
|
||||||
}
|
}
|
||||||
|
4
vendor/github.com/labstack/echo/v4/middleware/proxy.go
generated
vendored
4
vendor/github.com/labstack/echo/v4/middleware/proxy.go
generated
vendored
@ -359,6 +359,10 @@ func ProxyWithConfig(config ProxyConfig) echo.MiddlewareFunc {
|
|||||||
c.Set("_error", nil)
|
c.Set("_error", nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// This is needed for ProxyConfig.ModifyResponse and/or ProxyConfig.Transport to be able to process the Request
|
||||||
|
// that Balancer may have replaced with c.SetRequest.
|
||||||
|
req = c.Request()
|
||||||
|
|
||||||
// Proxy
|
// Proxy
|
||||||
switch {
|
switch {
|
||||||
case c.IsWebSocket():
|
case c.IsWebSocket():
|
||||||
|
5
vendor/github.com/labstack/echo/v4/middleware/request_id.go
generated
vendored
5
vendor/github.com/labstack/echo/v4/middleware/request_id.go
generated
vendored
@ -2,7 +2,6 @@ package middleware
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/labstack/echo/v4"
|
"github.com/labstack/echo/v4"
|
||||||
"github.com/labstack/gommon/random"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
@ -12,7 +11,7 @@ type (
|
|||||||
Skipper Skipper
|
Skipper Skipper
|
||||||
|
|
||||||
// Generator defines a function to generate an ID.
|
// Generator defines a function to generate an ID.
|
||||||
// Optional. Default value random.String(32).
|
// Optional. Defaults to generator for random string of length 32.
|
||||||
Generator func() string
|
Generator func() string
|
||||||
|
|
||||||
// RequestIDHandler defines a function which is executed for a request id.
|
// RequestIDHandler defines a function which is executed for a request id.
|
||||||
@ -73,5 +72,5 @@ func RequestIDWithConfig(config RequestIDConfig) echo.MiddlewareFunc {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func generator() string {
|
func generator() string {
|
||||||
return random.String(32)
|
return randomString(32)
|
||||||
}
|
}
|
||||||
|
24
vendor/github.com/labstack/echo/v4/middleware/request_logger.go
generated
vendored
24
vendor/github.com/labstack/echo/v4/middleware/request_logger.go
generated
vendored
@ -8,6 +8,30 @@ import (
|
|||||||
"github.com/labstack/echo/v4"
|
"github.com/labstack/echo/v4"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Example for `slog` https://pkg.go.dev/log/slog
|
||||||
|
// logger := slog.New(slog.NewJSONHandler(os.Stdout, nil))
|
||||||
|
// e.Use(middleware.RequestLoggerWithConfig(middleware.RequestLoggerConfig{
|
||||||
|
// LogStatus: true,
|
||||||
|
// LogURI: true,
|
||||||
|
// LogError: true,
|
||||||
|
// HandleError: true, // forwards error to the global error handler, so it can decide appropriate status code
|
||||||
|
// LogValuesFunc: func(c echo.Context, v middleware.RequestLoggerValues) error {
|
||||||
|
// if v.Error == nil {
|
||||||
|
// logger.LogAttrs(context.Background(), slog.LevelInfo, "REQUEST",
|
||||||
|
// slog.String("uri", v.URI),
|
||||||
|
// slog.Int("status", v.Status),
|
||||||
|
// )
|
||||||
|
// } else {
|
||||||
|
// logger.LogAttrs(context.Background(), slog.LevelError, "REQUEST_ERROR",
|
||||||
|
// slog.String("uri", v.URI),
|
||||||
|
// slog.Int("status", v.Status),
|
||||||
|
// slog.String("err", v.Error.Error()),
|
||||||
|
// )
|
||||||
|
// }
|
||||||
|
// return nil
|
||||||
|
// },
|
||||||
|
// }))
|
||||||
|
//
|
||||||
// Example for `fmt.Printf`
|
// Example for `fmt.Printf`
|
||||||
// e.Use(middleware.RequestLoggerWithConfig(middleware.RequestLoggerConfig{
|
// e.Use(middleware.RequestLoggerWithConfig(middleware.RequestLoggerConfig{
|
||||||
// LogStatus: true,
|
// LogStatus: true,
|
||||||
|
2
vendor/github.com/labstack/echo/v4/middleware/rewrite.go
generated
vendored
2
vendor/github.com/labstack/echo/v4/middleware/rewrite.go
generated
vendored
@ -27,7 +27,7 @@ type (
|
|||||||
// Example:
|
// Example:
|
||||||
// "^/old/[0.9]+/": "/new",
|
// "^/old/[0.9]+/": "/new",
|
||||||
// "^/api/.+?/(.*)": "/v2/$1",
|
// "^/api/.+?/(.*)": "/v2/$1",
|
||||||
RegexRules map[*regexp.Regexp]string `yaml:"regex_rules"`
|
RegexRules map[*regexp.Regexp]string `yaml:"-"`
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
46
vendor/github.com/labstack/echo/v4/middleware/util.go
generated
vendored
46
vendor/github.com/labstack/echo/v4/middleware/util.go
generated
vendored
@ -1,7 +1,11 @@
|
|||||||
package middleware
|
package middleware
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bufio"
|
||||||
|
"crypto/rand"
|
||||||
|
"io"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
func matchScheme(domain, pattern string) bool {
|
func matchScheme(domain, pattern string) bool {
|
||||||
@ -52,3 +56,45 @@ func matchSubdomain(domain, pattern string) bool {
|
|||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// https://tip.golang.org/doc/go1.19#:~:text=Read%20no%20longer%20buffers%20random%20data%20obtained%20from%20the%20operating%20system%20between%20calls
|
||||||
|
var randomReaderPool = sync.Pool{New: func() interface{} {
|
||||||
|
return bufio.NewReader(rand.Reader)
|
||||||
|
}}
|
||||||
|
|
||||||
|
const randomStringCharset = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
|
||||||
|
const randomStringCharsetLen = 52 // len(randomStringCharset)
|
||||||
|
const randomStringMaxByte = 255 - (256 % randomStringCharsetLen)
|
||||||
|
|
||||||
|
func randomString(length uint8) string {
|
||||||
|
reader := randomReaderPool.Get().(*bufio.Reader)
|
||||||
|
defer randomReaderPool.Put(reader)
|
||||||
|
|
||||||
|
b := make([]byte, length)
|
||||||
|
r := make([]byte, length+(length/4)) // perf: avoid read from rand.Reader many times
|
||||||
|
var i uint8 = 0
|
||||||
|
|
||||||
|
// security note:
|
||||||
|
// we can't just simply do b[i]=randomStringCharset[rb%len(randomStringCharset)],
|
||||||
|
// len(len(randomStringCharset)) is 52, and rb is [0, 255], 256 = 52 * 4 + 48.
|
||||||
|
// make the first 48 characters more possibly to be generated then others.
|
||||||
|
// So we have to skip bytes when rb > randomStringMaxByte
|
||||||
|
|
||||||
|
for {
|
||||||
|
_, err := io.ReadFull(reader, r)
|
||||||
|
if err != nil {
|
||||||
|
panic("unexpected error happened when reading from bufio.NewReader(crypto/rand.Reader)")
|
||||||
|
}
|
||||||
|
for _, rb := range r {
|
||||||
|
if rb > randomStringMaxByte {
|
||||||
|
// Skip this number to avoid bias.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
b[i] = randomStringCharset[rb%randomStringCharsetLen]
|
||||||
|
i++
|
||||||
|
if i == length {
|
||||||
|
return string(b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
48
vendor/github.com/labstack/gommon/random/random.go
generated
vendored
48
vendor/github.com/labstack/gommon/random/random.go
generated
vendored
@ -1,48 +0,0 @@
|
|||||||
package random
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math/rand"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
Random struct {
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// Charsets
|
|
||||||
const (
|
|
||||||
Uppercase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
|
||||||
Lowercase = "abcdefghijklmnopqrstuvwxyz"
|
|
||||||
Alphabetic = Uppercase + Lowercase
|
|
||||||
Numeric = "0123456789"
|
|
||||||
Alphanumeric = Alphabetic + Numeric
|
|
||||||
Symbols = "`" + `~!@#$%^&*()-_+={}[]|\;:"<>,./?`
|
|
||||||
Hex = Numeric + "abcdef"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
global = New()
|
|
||||||
)
|
|
||||||
|
|
||||||
func New() *Random {
|
|
||||||
rand.Seed(time.Now().UnixNano())
|
|
||||||
return new(Random)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Random) String(length uint8, charsets ...string) string {
|
|
||||||
charset := strings.Join(charsets, "")
|
|
||||||
if charset == "" {
|
|
||||||
charset = Alphanumeric
|
|
||||||
}
|
|
||||||
b := make([]byte, length)
|
|
||||||
for i := range b {
|
|
||||||
b[i] = charset[rand.Int63()%int64(len(charset))]
|
|
||||||
}
|
|
||||||
return string(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func String(length uint8, charsets ...string) string {
|
|
||||||
return global.String(length, charsets...)
|
|
||||||
}
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user