fix golangci-lint issues

Signed-off-by: CrazyMax <1951866+crazy-max@users.noreply.github.com>
This commit is contained in:
CrazyMax 2024-11-07 13:54:05 +01:00
parent 26bbddb5d6
commit e04da86aca
No known key found for this signature in database
GPG Key ID: ADE44D8C9D44FBE4
8 changed files with 10 additions and 9 deletions

View File

@ -76,6 +76,7 @@ linters-settings:
excludes: excludes:
- G204 # Audit use of command execution - G204 # Audit use of command execution
- G402 # TLS MinVersion too low - G402 # TLS MinVersion too low
- G115 # integer overflow conversion (TODO: verify these)
config: config:
G306: "0644" G306: "0644"

View File

@ -523,7 +523,7 @@ func Create(ctx context.Context, txn *store.Txn, dockerCli command.Cli, opts Cre
} }
cancelCtx, cancel := context.WithCancelCause(ctx) cancelCtx, cancel := context.WithCancelCause(ctx)
timeoutCtx, _ := context.WithTimeoutCause(cancelCtx, 20*time.Second, errors.WithStack(context.DeadlineExceeded)) timeoutCtx, _ := context.WithTimeoutCause(cancelCtx, 20*time.Second, errors.WithStack(context.DeadlineExceeded)) //nolint:govet,lostcancel // no need to manually cancel this context as we already rely on parent
defer func() { cancel(errors.WithStack(context.Canceled)) }() defer func() { cancel(errors.WithStack(context.Canceled)) }()
nodes, err := b.LoadNodes(timeoutCtx, WithData()) nodes, err := b.LoadNodes(timeoutCtx, WithData())

View File

@ -36,7 +36,7 @@ func runInspect(ctx context.Context, dockerCli command.Cli, in inspectOptions) e
} }
timeoutCtx, cancel := context.WithCancelCause(ctx) timeoutCtx, cancel := context.WithCancelCause(ctx)
timeoutCtx, _ = context.WithTimeoutCause(timeoutCtx, 20*time.Second, errors.WithStack(context.DeadlineExceeded)) timeoutCtx, _ = context.WithTimeoutCause(timeoutCtx, 20*time.Second, errors.WithStack(context.DeadlineExceeded)) //nolint:govet,lostcancel // no need to manually cancel this context as we already rely on parent
defer func() { cancel(errors.WithStack(context.Canceled)) }() defer func() { cancel(errors.WithStack(context.Canceled)) }()
nodes, err := b.LoadNodes(timeoutCtx, builder.WithData()) nodes, err := b.LoadNodes(timeoutCtx, builder.WithData())

View File

@ -59,7 +59,7 @@ func runLs(ctx context.Context, dockerCli command.Cli, in lsOptions) error {
} }
timeoutCtx, cancel := context.WithCancelCause(ctx) timeoutCtx, cancel := context.WithCancelCause(ctx)
timeoutCtx, _ = context.WithTimeoutCause(timeoutCtx, 20*time.Second, errors.WithStack(context.DeadlineExceeded)) timeoutCtx, _ = context.WithTimeoutCause(timeoutCtx, 20*time.Second, errors.WithStack(context.DeadlineExceeded)) //nolint:govet,lostcancel // no need to manually cancel this context as we already rely on parent
defer func() { cancel(errors.WithStack(context.Canceled)) }() defer func() { cancel(errors.WithStack(context.Canceled)) }()
eg, _ := errgroup.WithContext(timeoutCtx) eg, _ := errgroup.WithContext(timeoutCtx)

View File

@ -151,7 +151,7 @@ func rmAllInactive(ctx context.Context, txn *store.Txn, dockerCli command.Cli, i
} }
timeoutCtx, cancel := context.WithCancelCause(ctx) timeoutCtx, cancel := context.WithCancelCause(ctx)
timeoutCtx, _ = context.WithTimeoutCause(timeoutCtx, 20*time.Second, errors.WithStack(context.DeadlineExceeded)) timeoutCtx, _ = context.WithTimeoutCause(timeoutCtx, 20*time.Second, errors.WithStack(context.DeadlineExceeded)) //nolint:govet,lostcancel // no need to manually cancel this context as we already rely on parent
defer func() { cancel(errors.WithStack(context.Canceled)) }() defer func() { cancel(errors.WithStack(context.Canceled)) }()
eg, _ := errgroup.WithContext(timeoutCtx) eg, _ := errgroup.WithContext(timeoutCtx)

View File

@ -63,7 +63,7 @@ func NewRemoteBuildxController(ctx context.Context, dockerCli command.Cli, opts
// connect to buildx server if it is already running // connect to buildx server if it is already running
ctx2, cancel := context.WithCancelCause(ctx) ctx2, cancel := context.WithCancelCause(ctx)
ctx2, _ = context.WithTimeoutCause(ctx2, 1*time.Second, errors.WithStack(context.DeadlineExceeded)) ctx2, _ = context.WithTimeoutCause(ctx2, 1*time.Second, errors.WithStack(context.DeadlineExceeded)) //nolint:govet,lostcancel // no need to manually cancel this context as we already rely on parent
c, err := newBuildxClientAndCheck(ctx2, filepath.Join(serverRoot, defaultSocketFilename)) c, err := newBuildxClientAndCheck(ctx2, filepath.Join(serverRoot, defaultSocketFilename))
cancel(errors.WithStack(context.Canceled)) cancel(errors.WithStack(context.Canceled))
if err != nil { if err != nil {
@ -92,7 +92,7 @@ func NewRemoteBuildxController(ctx context.Context, dockerCli command.Cli, opts
// wait for buildx server to be ready // wait for buildx server to be ready
ctx2, cancel = context.WithCancelCause(ctx) ctx2, cancel = context.WithCancelCause(ctx)
ctx2, _ = context.WithTimeoutCause(ctx2, 10*time.Second, errors.WithStack(context.DeadlineExceeded)) ctx2, _ = context.WithTimeoutCause(ctx2, 10*time.Second, errors.WithStack(context.DeadlineExceeded)) //nolint:govet,lostcancel // no need to manually cancel this context as we already rely on parent
c, err = newBuildxClientAndCheck(ctx2, filepath.Join(serverRoot, defaultSocketFilename)) c, err = newBuildxClientAndCheck(ctx2, filepath.Join(serverRoot, defaultSocketFilename))
cancel(errors.WithStack(context.Canceled)) cancel(errors.WithStack(context.Canceled))
if err != nil { if err != nil {

View File

@ -29,7 +29,7 @@ func (d *Driver) Bootstrap(ctx context.Context, l progress.Logger) error {
func (d *Driver) Info(ctx context.Context) (*driver.Info, error) { func (d *Driver) Info(ctx context.Context) (*driver.Info, error) {
_, err := d.DockerAPI.ServerVersion(ctx) _, err := d.DockerAPI.ServerVersion(ctx)
if err != nil { if err != nil {
return nil, errors.Wrapf(driver.ErrNotConnecting{}, err.Error()) return nil, errors.Wrap(driver.ErrNotConnecting{}, err.Error())
} }
return &driver.Info{ return &driver.Info{
Status: driver.Running, Status: driver.Running,
@ -39,7 +39,7 @@ func (d *Driver) Info(ctx context.Context) (*driver.Info, error) {
func (d *Driver) Version(ctx context.Context) (string, error) { func (d *Driver) Version(ctx context.Context) (string, error) {
v, err := d.DockerAPI.ServerVersion(ctx) v, err := d.DockerAPI.ServerVersion(ctx)
if err != nil { if err != nil {
return "", errors.Wrapf(driver.ErrNotConnecting{}, err.Error()) return "", errors.Wrap(driver.ErrNotConnecting{}, err.Error())
} }
if bkversion, _ := resolveBuildKitVersion(v.Version); bkversion != "" { if bkversion, _ := resolveBuildKitVersion(v.Version); bkversion != "" {
return bkversion, nil return bkversion, nil

View File

@ -48,7 +48,7 @@ func (d *Driver) Bootstrap(ctx context.Context, l progress.Logger) error {
} }
return progress.Wrap("[internal] waiting for connection", l, func(_ progress.SubLogger) error { return progress.Wrap("[internal] waiting for connection", l, func(_ progress.SubLogger) error {
cancelCtx, cancel := context.WithCancelCause(ctx) cancelCtx, cancel := context.WithCancelCause(ctx)
ctx, _ := context.WithTimeoutCause(cancelCtx, 20*time.Second, errors.WithStack(context.DeadlineExceeded)) ctx, _ := context.WithTimeoutCause(cancelCtx, 20*time.Second, errors.WithStack(context.DeadlineExceeded)) //nolint:govet,lostcancel // no need to manually cancel this context as we already rely on parent
defer func() { cancel(errors.WithStack(context.Canceled)) }() defer func() { cancel(errors.WithStack(context.Canceled)) }()
return c.Wait(ctx) return c.Wait(ctx)
}) })