2017-01-02 11:29:20 +00:00
|
|
|
// Copyright (C) 2017 The Syncthing Authors.
|
|
|
|
//
|
|
|
|
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
2017-02-09 06:52:18 +00:00
|
|
|
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
2017-01-02 11:29:20 +00:00
|
|
|
|
|
|
|
package connections
|
|
|
|
|
|
|
|
import (
|
2018-03-27 11:18:26 +00:00
|
|
|
"context"
|
2017-01-02 11:29:20 +00:00
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"sync/atomic"
|
|
|
|
|
refactor: use modern Protobuf encoder (#9817)
At a high level, this is what I've done and why:
- I'm moving the protobuf generation for the `protocol`, `discovery` and
`db` packages to the modern alternatives, and using `buf` to generate
because it's nice and simple.
- After trying various approaches on how to integrate the new types with
the existing code, I opted for splitting off our own data model types
from the on-the-wire generated types. This means we can have a
`FileInfo` type with nicer ergonomics and lots of methods, while the
protobuf generated type stays clean and close to the wire protocol. It
does mean copying between the two when required, which certainly adds a
small amount of inefficiency. If we want to walk this back in the future
and use the raw generated type throughout, that's possible, this however
makes the refactor smaller (!) as it doesn't change everything about the
type for everyone at the same time.
- I have simply removed in cold blood a significant number of old
database migrations. These depended on previous generations of generated
messages of various kinds and were annoying to support in the new
fashion. The oldest supported database version now is the one from
Syncthing 1.9.0 from Sep 7, 2020.
- I changed config structs to be regular manually defined structs.
For the sake of discussion, some things I tried that turned out not to
work...
### Embedding / wrapping
Embedding the protobuf generated structs in our existing types as a data
container and keeping our methods and stuff:
```
package protocol
type FileInfo struct {
*generated.FileInfo
}
```
This generates a lot of problems because the internal shape of the
generated struct is quite different (different names, different types,
more pointers), because initializing it doesn't work like you'd expect
(i.e., you end up with an embedded nil pointer and a panic), and because
the types of child types don't get wrapped. That is, even if we also
have a similar wrapper around a `Vector`, that's not the type you get
when accessing `someFileInfo.Version`, you get the `*generated.Vector`
that doesn't have methods, etc.
### Aliasing
```
package protocol
type FileInfo = generated.FileInfo
```
Doesn't help because you can't attach methods to it, plus all the above.
### Generating the types into the target package like we do now and
attaching methods
This fails because of the different shape of the generated type (as in
the embedding case above) plus the generated struct already has a bunch
of methods that we can't necessarily override properly (like `String()`
and a bunch of getters).
### Methods to functions
I considered just moving all the methods we attach to functions in a
specific package, so that for example
```
package protocol
func (f FileInfo) Equal(other FileInfo) bool
```
would become
```
package fileinfos
func Equal(a, b *generated.FileInfo) bool
```
and this would mostly work, but becomes quite verbose and cumbersome,
and somewhat limits discoverability (you can't see what methods are
available on the type in auto completions, etc). In the end I did this
in some cases, like in the database layer where a lot of things like
`func (fv *FileVersion) IsEmpty() bool` becomes `func fvIsEmpty(fv
*generated.FileVersion)` because they were anyway just internal methods.
Fixes #8247
2024-12-01 15:50:17 +00:00
|
|
|
"golang.org/x/time/rate"
|
|
|
|
|
2017-01-02 11:29:20 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/config"
|
2018-03-26 10:01:59 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/protocol"
|
|
|
|
"github.com/syncthing/syncthing/lib/sync"
|
2017-01-02 11:29:20 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// limiter manages a read and write rate limit, reacting to config changes
|
|
|
|
// as appropriate.
|
|
|
|
type limiter struct {
|
2020-08-25 06:11:14 +00:00
|
|
|
myID protocol.DeviceID
|
2018-03-26 10:56:50 +00:00
|
|
|
mu sync.Mutex
|
2018-03-26 10:01:59 +00:00
|
|
|
write *rate.Limiter
|
|
|
|
read *rate.Limiter
|
2023-02-07 11:07:34 +00:00
|
|
|
limitsLAN atomic.Bool
|
2018-03-26 10:01:59 +00:00
|
|
|
deviceReadLimiters map[protocol.DeviceID]*rate.Limiter
|
|
|
|
deviceWriteLimiters map[protocol.DeviceID]*rate.Limiter
|
2018-03-26 10:56:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type waiter interface {
|
|
|
|
// This is the rate limiting operation
|
|
|
|
WaitN(ctx context.Context, n int) error
|
2019-09-04 10:12:17 +00:00
|
|
|
Limit() rate.Limit
|
2017-01-02 11:29:20 +00:00
|
|
|
}
|
|
|
|
|
2019-09-04 10:12:17 +00:00
|
|
|
const (
|
2022-11-03 14:44:46 +00:00
|
|
|
limiterBurstSize = 4 * 128 << 10
|
2019-09-04 10:12:17 +00:00
|
|
|
)
|
2017-01-02 11:29:20 +00:00
|
|
|
|
2020-08-25 06:11:14 +00:00
|
|
|
func newLimiter(myId protocol.DeviceID, cfg config.Wrapper) *limiter {
|
2017-01-02 11:29:20 +00:00
|
|
|
l := &limiter{
|
2020-08-25 06:11:14 +00:00
|
|
|
myID: myId,
|
2018-03-26 10:01:59 +00:00
|
|
|
write: rate.NewLimiter(rate.Inf, limiterBurstSize),
|
|
|
|
read: rate.NewLimiter(rate.Inf, limiterBurstSize),
|
|
|
|
mu: sync.NewMutex(),
|
|
|
|
deviceReadLimiters: make(map[protocol.DeviceID]*rate.Limiter),
|
|
|
|
deviceWriteLimiters: make(map[protocol.DeviceID]*rate.Limiter),
|
2017-01-02 11:29:20 +00:00
|
|
|
}
|
2018-03-26 10:01:59 +00:00
|
|
|
|
2017-01-02 11:29:20 +00:00
|
|
|
cfg.Subscribe(l)
|
|
|
|
prev := config.Configuration{Options: config.OptionsConfiguration{MaxRecvKbps: -1, MaxSendKbps: -1}}
|
2018-03-26 10:01:59 +00:00
|
|
|
|
2017-01-02 11:29:20 +00:00
|
|
|
l.CommitConfiguration(prev, cfg.RawCopy())
|
|
|
|
return l
|
|
|
|
}
|
|
|
|
|
2018-03-26 10:01:59 +00:00
|
|
|
// This function sets limiters according to corresponding DeviceConfiguration
|
|
|
|
func (lim *limiter) setLimitsLocked(device config.DeviceConfiguration) bool {
|
|
|
|
readLimiter := lim.getReadLimiterLocked(device.DeviceID)
|
|
|
|
writeLimiter := lim.getWriteLimiterLocked(device.DeviceID)
|
|
|
|
|
|
|
|
// limiters for this device are created so we can store previous rates for logging
|
|
|
|
previousReadLimit := readLimiter.Limit()
|
|
|
|
previousWriteLimit := writeLimiter.Limit()
|
|
|
|
currentReadLimit := rate.Limit(device.MaxRecvKbps) * 1024
|
|
|
|
currentWriteLimit := rate.Limit(device.MaxSendKbps) * 1024
|
|
|
|
if device.MaxSendKbps <= 0 {
|
|
|
|
currentWriteLimit = rate.Inf
|
|
|
|
}
|
|
|
|
if device.MaxRecvKbps <= 0 {
|
|
|
|
currentReadLimit = rate.Inf
|
|
|
|
}
|
|
|
|
// Nothing about this device has changed. Start processing next device
|
|
|
|
if previousWriteLimit == currentWriteLimit && previousReadLimit == currentReadLimit {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
readLimiter.SetLimit(currentReadLimit)
|
|
|
|
writeLimiter.SetLimit(currentWriteLimit)
|
|
|
|
|
|
|
|
return true
|
2017-01-02 11:29:20 +00:00
|
|
|
}
|
|
|
|
|
2018-03-26 10:01:59 +00:00
|
|
|
// This function handles removing, adding and updating of device limiters.
|
|
|
|
func (lim *limiter) processDevicesConfigurationLocked(from, to config.Configuration) {
|
|
|
|
seen := make(map[protocol.DeviceID]struct{})
|
|
|
|
|
|
|
|
// Mark devices which should not be removed, create new limiters if needed and assign new limiter rate
|
|
|
|
for _, dev := range to.Devices {
|
2020-08-25 06:11:14 +00:00
|
|
|
if dev.DeviceID == lim.myID {
|
2018-03-26 10:01:59 +00:00
|
|
|
// This limiter was created for local device. Should skip this device
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
seen[dev.DeviceID] = struct{}{}
|
|
|
|
|
|
|
|
if lim.setLimitsLocked(dev) {
|
|
|
|
readLimitStr := "is unlimited"
|
|
|
|
if dev.MaxRecvKbps > 0 {
|
|
|
|
readLimitStr = fmt.Sprintf("limit is %d KiB/s", dev.MaxRecvKbps)
|
|
|
|
}
|
|
|
|
writeLimitStr := "is unlimited"
|
|
|
|
if dev.MaxSendKbps > 0 {
|
|
|
|
writeLimitStr = fmt.Sprintf("limit is %d KiB/s", dev.MaxSendKbps)
|
|
|
|
}
|
|
|
|
|
2018-03-26 10:56:50 +00:00
|
|
|
l.Infof("Device %s send rate %s, receive rate %s", dev.DeviceID, writeLimitStr, readLimitStr)
|
2018-03-26 10:01:59 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete remote devices which were removed in new configuration
|
|
|
|
for _, dev := range from.Devices {
|
|
|
|
if _, ok := seen[dev.DeviceID]; !ok {
|
|
|
|
l.Debugf("deviceID: %s should be removed", dev.DeviceID)
|
|
|
|
|
|
|
|
delete(lim.deviceWriteLimiters, dev.DeviceID)
|
|
|
|
delete(lim.deviceReadLimiters, dev.DeviceID)
|
|
|
|
}
|
|
|
|
}
|
2017-01-02 11:29:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (lim *limiter) CommitConfiguration(from, to config.Configuration) bool {
|
2018-03-26 10:01:59 +00:00
|
|
|
// to ensure atomic update of configuration
|
|
|
|
lim.mu.Lock()
|
|
|
|
defer lim.mu.Unlock()
|
|
|
|
|
|
|
|
// Delete, add or update limiters for devices
|
|
|
|
lim.processDevicesConfigurationLocked(from, to)
|
|
|
|
|
2017-01-02 11:29:20 +00:00
|
|
|
if from.Options.MaxRecvKbps == to.Options.MaxRecvKbps &&
|
|
|
|
from.Options.MaxSendKbps == to.Options.MaxSendKbps &&
|
|
|
|
from.Options.LimitBandwidthInLan == to.Options.LimitBandwidthInLan {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2018-10-05 06:22:47 +00:00
|
|
|
limited := false
|
|
|
|
sendLimitStr := "is unlimited"
|
|
|
|
recvLimitStr := "is unlimited"
|
|
|
|
|
2017-01-02 11:29:20 +00:00
|
|
|
// The rate variables are in KiB/s in the config (despite the camel casing
|
|
|
|
// of the name). We multiply by 1024 to get bytes/s.
|
|
|
|
if to.Options.MaxRecvKbps <= 0 {
|
|
|
|
lim.read.SetLimit(rate.Inf)
|
|
|
|
} else {
|
|
|
|
lim.read.SetLimit(1024 * rate.Limit(to.Options.MaxRecvKbps))
|
2018-10-05 06:22:47 +00:00
|
|
|
recvLimitStr = fmt.Sprintf("limit is %d KiB/s", to.Options.MaxRecvKbps)
|
|
|
|
limited = true
|
2017-01-02 11:29:20 +00:00
|
|
|
}
|
|
|
|
|
2017-01-23 20:55:00 +00:00
|
|
|
if to.Options.MaxSendKbps <= 0 {
|
2017-01-02 11:29:20 +00:00
|
|
|
lim.write.SetLimit(rate.Inf)
|
|
|
|
} else {
|
|
|
|
lim.write.SetLimit(1024 * rate.Limit(to.Options.MaxSendKbps))
|
2018-10-05 06:22:47 +00:00
|
|
|
sendLimitStr = fmt.Sprintf("limit is %d KiB/s", to.Options.MaxSendKbps)
|
|
|
|
limited = true
|
2017-01-02 11:29:20 +00:00
|
|
|
}
|
|
|
|
|
2023-02-07 11:07:34 +00:00
|
|
|
lim.limitsLAN.Store(to.Options.LimitBandwidthInLan)
|
2017-01-02 11:29:20 +00:00
|
|
|
|
2018-03-26 10:01:59 +00:00
|
|
|
l.Infof("Overall send rate %s, receive rate %s", sendLimitStr, recvLimitStr)
|
2017-01-02 11:29:20 +00:00
|
|
|
|
2018-10-05 06:22:47 +00:00
|
|
|
if limited {
|
|
|
|
if to.Options.LimitBandwidthInLan {
|
|
|
|
l.Infoln("Rate limits apply to LAN connections")
|
|
|
|
} else {
|
|
|
|
l.Infoln("Rate limits do not apply to LAN connections")
|
|
|
|
}
|
2017-01-02 11:29:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2022-07-28 15:32:45 +00:00
|
|
|
func (*limiter) String() string {
|
2017-01-02 11:29:20 +00:00
|
|
|
// required by config.Committer interface
|
|
|
|
return "connections.limiter"
|
|
|
|
}
|
|
|
|
|
2018-03-26 10:56:50 +00:00
|
|
|
func (lim *limiter) getLimiters(remoteID protocol.DeviceID, rw io.ReadWriter, isLAN bool) (io.Reader, io.Writer) {
|
2018-03-26 10:01:59 +00:00
|
|
|
lim.mu.Lock()
|
2018-03-26 10:56:50 +00:00
|
|
|
wr := lim.newLimitedWriterLocked(remoteID, rw, isLAN)
|
|
|
|
rd := lim.newLimitedReaderLocked(remoteID, rw, isLAN)
|
2018-03-26 10:01:59 +00:00
|
|
|
lim.mu.Unlock()
|
|
|
|
return rd, wr
|
|
|
|
}
|
|
|
|
|
|
|
|
func (lim *limiter) newLimitedReaderLocked(remoteID protocol.DeviceID, r io.Reader, isLAN bool) io.Reader {
|
2018-03-26 10:56:50 +00:00
|
|
|
return &limitedReader{
|
2019-09-04 10:12:17 +00:00
|
|
|
reader: r,
|
|
|
|
waiterHolder: waiterHolder{
|
|
|
|
waiter: totalWaiter{lim.getReadLimiterLocked(remoteID), lim.read},
|
|
|
|
limitsLAN: &lim.limitsLAN,
|
|
|
|
isLAN: isLAN,
|
|
|
|
},
|
2018-03-26 10:56:50 +00:00
|
|
|
}
|
2018-03-26 10:01:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (lim *limiter) newLimitedWriterLocked(remoteID protocol.DeviceID, w io.Writer, isLAN bool) io.Writer {
|
2018-03-26 10:56:50 +00:00
|
|
|
return &limitedWriter{
|
2019-09-04 10:12:17 +00:00
|
|
|
writer: w,
|
|
|
|
waiterHolder: waiterHolder{
|
|
|
|
waiter: totalWaiter{lim.getWriteLimiterLocked(remoteID), lim.write},
|
|
|
|
limitsLAN: &lim.limitsLAN,
|
|
|
|
isLAN: isLAN,
|
|
|
|
},
|
2018-03-26 10:56:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (lim *limiter) getReadLimiterLocked(deviceID protocol.DeviceID) *rate.Limiter {
|
|
|
|
return getRateLimiter(lim.deviceReadLimiters, deviceID)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (lim *limiter) getWriteLimiterLocked(deviceID protocol.DeviceID) *rate.Limiter {
|
|
|
|
return getRateLimiter(lim.deviceWriteLimiters, deviceID)
|
|
|
|
}
|
|
|
|
|
|
|
|
func getRateLimiter(m map[protocol.DeviceID]*rate.Limiter, deviceID protocol.DeviceID) *rate.Limiter {
|
|
|
|
limiter, ok := m[deviceID]
|
|
|
|
if !ok {
|
|
|
|
limiter = rate.NewLimiter(rate.Inf, limiterBurstSize)
|
|
|
|
m[deviceID] = limiter
|
|
|
|
}
|
|
|
|
return limiter
|
2018-03-26 10:01:59 +00:00
|
|
|
}
|
|
|
|
|
2017-01-02 11:29:20 +00:00
|
|
|
// limitedReader is a rate limited io.Reader
|
|
|
|
type limitedReader struct {
|
2019-09-04 10:12:17 +00:00
|
|
|
reader io.Reader
|
|
|
|
waiterHolder
|
2017-01-02 11:29:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (r *limitedReader) Read(buf []byte) (int, error) {
|
|
|
|
n, err := r.reader.Read(buf)
|
2019-09-04 10:12:17 +00:00
|
|
|
if !r.unlimited() {
|
|
|
|
r.take(n)
|
2017-01-02 11:29:20 +00:00
|
|
|
}
|
|
|
|
return n, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// limitedWriter is a rate limited io.Writer
|
|
|
|
type limitedWriter struct {
|
2019-09-04 10:12:17 +00:00
|
|
|
writer io.Writer
|
|
|
|
waiterHolder
|
|
|
|
}
|
|
|
|
|
|
|
|
func (w *limitedWriter) Write(buf []byte) (int, error) {
|
|
|
|
if w.unlimited() {
|
|
|
|
return w.writer.Write(buf)
|
|
|
|
}
|
|
|
|
|
|
|
|
// This does (potentially) multiple smaller writes in order to be less
|
2022-11-03 14:44:46 +00:00
|
|
|
// bursty with large writes and slow rates. At the same time we don't
|
|
|
|
// want to do hilarious amounts of tiny writes when the rate is high, so
|
|
|
|
// try to be a bit adaptable. We range from the minimum write size of 1
|
|
|
|
// KiB up to the limiter burst size, aiming for about a write every
|
|
|
|
// 10ms.
|
|
|
|
singleWriteSize := int(w.waiter.Limit() / 100) // 10ms worth of data
|
|
|
|
singleWriteSize = ((singleWriteSize / 1024) + 1) * 1024 // round up to the next kibibyte
|
|
|
|
if singleWriteSize > limiterBurstSize {
|
|
|
|
singleWriteSize = limiterBurstSize
|
|
|
|
}
|
|
|
|
|
2019-09-04 10:12:17 +00:00
|
|
|
written := 0
|
|
|
|
for written < len(buf) {
|
2022-11-03 14:44:46 +00:00
|
|
|
toWrite := singleWriteSize
|
2019-09-04 10:12:17 +00:00
|
|
|
if toWrite > len(buf)-written {
|
|
|
|
toWrite = len(buf) - written
|
|
|
|
}
|
|
|
|
w.take(toWrite)
|
|
|
|
n, err := w.writer.Write(buf[written : written+toWrite])
|
|
|
|
written += n
|
|
|
|
if err != nil {
|
|
|
|
return written, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return written, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// waiterHolder is the common functionality around having and evaluating a
|
|
|
|
// waiter, valid for both writers and readers
|
|
|
|
type waiterHolder struct {
|
2018-03-26 10:56:50 +00:00
|
|
|
waiter waiter
|
2023-02-07 11:07:34 +00:00
|
|
|
limitsLAN *atomic.Bool
|
2018-03-26 10:56:50 +00:00
|
|
|
isLAN bool
|
2017-01-02 11:29:20 +00:00
|
|
|
}
|
|
|
|
|
2019-09-04 10:12:17 +00:00
|
|
|
// unlimited returns true if the waiter is not limiting the rate
|
|
|
|
func (w waiterHolder) unlimited() bool {
|
2023-02-07 11:07:34 +00:00
|
|
|
if w.isLAN && !w.limitsLAN.Load() {
|
2019-09-04 10:12:17 +00:00
|
|
|
return true
|
2017-01-02 11:29:20 +00:00
|
|
|
}
|
2019-09-04 10:12:17 +00:00
|
|
|
return w.waiter.Limit() == rate.Inf
|
2017-01-02 11:29:20 +00:00
|
|
|
}
|
|
|
|
|
2019-09-04 10:12:17 +00:00
|
|
|
// take is a utility function to consume tokens, because no call to WaitN
|
|
|
|
// must be larger than the limiter burst size or it will hang.
|
|
|
|
func (w waiterHolder) take(tokens int) {
|
|
|
|
// For writes we already split the buffer into smaller operations so those
|
|
|
|
// will always end up in the fast path below. For reads, however, we don't
|
|
|
|
// control the size of the incoming buffer and don't split the calls
|
|
|
|
// into the lower level reads so we might get a large amount of data and
|
|
|
|
// end up in the loop further down.
|
|
|
|
|
2022-11-03 14:44:46 +00:00
|
|
|
if tokens <= limiterBurstSize {
|
2019-09-04 10:12:17 +00:00
|
|
|
// Fast path. We won't get an error from WaitN as we don't pass a
|
|
|
|
// context with a deadline.
|
|
|
|
_ = w.waiter.WaitN(context.TODO(), tokens)
|
2017-01-02 11:29:20 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
for tokens > 0 {
|
|
|
|
// Consume limiterBurstSize tokens at a time until we're done.
|
|
|
|
if tokens > limiterBurstSize {
|
2019-09-04 10:12:17 +00:00
|
|
|
_ = w.waiter.WaitN(context.TODO(), limiterBurstSize)
|
2017-01-02 11:29:20 +00:00
|
|
|
tokens -= limiterBurstSize
|
|
|
|
} else {
|
2019-09-04 10:12:17 +00:00
|
|
|
_ = w.waiter.WaitN(context.TODO(), tokens)
|
2017-01-02 11:29:20 +00:00
|
|
|
tokens = 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-26 10:56:50 +00:00
|
|
|
// totalWaiter waits for all of the waiters
|
|
|
|
type totalWaiter []waiter
|
2018-03-26 10:01:59 +00:00
|
|
|
|
2018-03-26 10:56:50 +00:00
|
|
|
func (tw totalWaiter) WaitN(ctx context.Context, n int) error {
|
|
|
|
for _, w := range tw {
|
|
|
|
if err := w.WaitN(ctx, n); err != nil {
|
|
|
|
// error here is context cancellation, most likely, so we abort
|
|
|
|
// early
|
|
|
|
return err
|
|
|
|
}
|
2018-03-26 10:01:59 +00:00
|
|
|
}
|
2018-03-26 10:56:50 +00:00
|
|
|
return nil
|
2018-03-26 10:01:59 +00:00
|
|
|
}
|
2019-09-04 10:12:17 +00:00
|
|
|
|
|
|
|
func (tw totalWaiter) Limit() rate.Limit {
|
|
|
|
min := rate.Inf
|
|
|
|
for _, w := range tw {
|
|
|
|
if l := w.Limit(); l < min {
|
|
|
|
min = l
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return min
|
|
|
|
}
|