fix(deps): update module github.com/docker/docker to v27.5.0+incompatible (#15738)

Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: Paul Rogers <paul.rogers@grafana.com>
pull/15777/head
renovate[bot] 12 months ago committed by GitHub
parent 1d269b5f52
commit e58665dedf
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 2
      clients/cmd/docker-driver/driver.go
  2. 3
      go.mod
  3. 6
      go.sum
  4. 191
      vendor/github.com/containerd/containerd/LICENSE
  5. 16
      vendor/github.com/containerd/containerd/NOTICE
  6. 94
      vendor/github.com/containerd/containerd/tracing/helpers.go
  7. 66
      vendor/github.com/containerd/containerd/tracing/log.go
  8. 129
      vendor/github.com/containerd/containerd/tracing/tracing.go
  9. 32
      vendor/github.com/docker/docker/api/swagger.yaml
  10. 2
      vendor/github.com/docker/docker/api/types/container/hostconfig.go
  11. 2
      vendor/github.com/docker/docker/api/types/types.go
  12. 10
      vendor/github.com/docker/docker/client/client.go
  13. 4
      vendor/github.com/docker/docker/client/ping.go
  14. 8
      vendor/github.com/docker/docker/daemon/logger/adapter.go
  15. 2
      vendor/github.com/docker/docker/daemon/logger/factory.go
  16. 2
      vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go
  17. 8
      vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read.go
  18. 3
      vendor/github.com/docker/docker/daemon/logger/logger.go
  19. 18
      vendor/github.com/docker/docker/daemon/logger/loggerutils/follow.go
  20. 564
      vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go
  21. 2
      vendor/github.com/docker/docker/daemon/logger/loggerutils/sharedtemp.go
  22. 10
      vendor/github.com/docker/docker/daemon/logger/ring.go
  23. 6
      vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go
  24. 8
      vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go
  25. 15
      vendor/github.com/docker/docker/pkg/ioutils/writers.go
  26. 2
      vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go
  27. 8
      vendor/github.com/docker/docker/pkg/tailfile/tailfile.go
  28. 5
      vendor/modules.txt

@ -195,7 +195,7 @@ func (d *driver) ReadLogs(info logger.Info, config logger.ReadConfig) (io.ReadCl
}
go func() {
watcher := lr.ReadLogs(config)
watcher := lr.ReadLogs(context.Background(), config)
enc := protoio.NewUint32DelimitedWriter(w, binary.BigEndian)
defer enc.Close()

@ -29,7 +29,7 @@ require (
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
github.com/cristalhq/hedgedhttp v0.9.1
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
github.com/docker/docker v27.3.1+incompatible
github.com/docker/docker v27.5.0+incompatible
github.com/docker/go-plugins-helpers v0.0.0-20240701071450-45e2431495c8
github.com/drone/envsubst v1.0.3
github.com/dustin/go-humanize v1.0.1
@ -164,6 +164,7 @@ require (
github.com/andybalholm/brotli v1.1.0 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 // indirect
github.com/containerd/containerd v1.7.25 // indirect
github.com/dlclark/regexp2 v1.11.4 // indirect
github.com/ebitengine/purego v0.8.1 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect

@ -284,6 +284,8 @@ github.com/cncf/xds/go v0.0.0-20241223141626-cff3c89139a3 h1:boJj011Hh+874zpIySe
github.com/cncf/xds/go v0.0.0-20241223141626-cff3c89139a3/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
github.com/containerd/containerd v1.7.25 h1:khEQOAXOEJalRO228yzVsuASLH42vT7DIo9Ss+9SMFQ=
github.com/containerd/containerd v1.7.25/go.mod h1:tWfHzVI0azhw4CT2vaIjsb2CoV4LJ9PrMPaULAr21Ok=
github.com/containerd/fifo v1.1.0 h1:4I2mbh5stb1u6ycIABlBw9zgtlK8viPI9QkQNRQEEmY=
github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o=
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
@ -326,8 +328,8 @@ github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5Qvfr
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/dlclark/regexp2 v1.11.4 h1:rPYF9/LECdNymJufQKmri9gV604RvvABwgOA8un7yAo=
github.com/dlclark/regexp2 v1.11.4/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI=
github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v27.5.0+incompatible h1:um++2NcQtGRTz5eEgO6aJimo6/JxrTXC941hd05JO6U=
github.com/docker/docker v27.5.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8=

@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
https://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright The containerd Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

@ -0,0 +1,16 @@
Docker
Copyright 2012-2015 Docker, Inc.
This product includes software developed at Docker, Inc. (https://www.docker.com).
The following is courtesy of our legal counsel:
Use and transfer of Docker may be subject to certain restrictions by the
United States and other governments.
It is your responsibility to ensure that your use and/or transfer does not
violate applicable laws.
For more information, please see https://www.bis.doc.gov
See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.

@ -0,0 +1,94 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package tracing
import (
"encoding/json"
"fmt"
"strings"
"go.opentelemetry.io/otel/attribute"
)
const (
spanDelimiter = "."
)
func makeSpanName(names ...string) string {
return strings.Join(names, spanDelimiter)
}
func any(k string, v interface{}) attribute.KeyValue {
if v == nil {
return attribute.String(k, "<nil>")
}
switch typed := v.(type) {
case bool:
return attribute.Bool(k, typed)
case []bool:
return attribute.BoolSlice(k, typed)
case int:
return attribute.Int(k, typed)
case []int:
return attribute.IntSlice(k, typed)
case int8:
return attribute.Int(k, int(typed))
case []int8:
ls := make([]int, 0, len(typed))
for _, i := range typed {
ls = append(ls, int(i))
}
return attribute.IntSlice(k, ls)
case int16:
return attribute.Int(k, int(typed))
case []int16:
ls := make([]int, 0, len(typed))
for _, i := range typed {
ls = append(ls, int(i))
}
return attribute.IntSlice(k, ls)
case int32:
return attribute.Int64(k, int64(typed))
case []int32:
ls := make([]int64, 0, len(typed))
for _, i := range typed {
ls = append(ls, int64(i))
}
return attribute.Int64Slice(k, ls)
case int64:
return attribute.Int64(k, typed)
case []int64:
return attribute.Int64Slice(k, typed)
case float64:
return attribute.Float64(k, typed)
case []float64:
return attribute.Float64Slice(k, typed)
case string:
return attribute.String(k, typed)
case []string:
return attribute.StringSlice(k, typed)
}
if stringer, ok := v.(fmt.Stringer); ok {
return attribute.String(k, stringer.String())
}
if b, err := json.Marshal(v); b != nil && err == nil {
return attribute.String(k, string(b))
}
return attribute.String(k, fmt.Sprintf("%v", v))
}

@ -0,0 +1,66 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package tracing
import (
"github.com/sirupsen/logrus"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
// NewLogrusHook creates a new logrus hook
func NewLogrusHook() *LogrusHook {
return &LogrusHook{}
}
// LogrusHook is a logrus hook which adds logrus events to active spans.
// If the span is not recording or the span context is invalid, the hook is a no-op.
type LogrusHook struct{}
// Levels returns the logrus levels that this hook is interested in.
func (h *LogrusHook) Levels() []logrus.Level {
return logrus.AllLevels
}
// Fire is called when a log event occurs.
func (h *LogrusHook) Fire(entry *logrus.Entry) error {
span := trace.SpanFromContext(entry.Context)
if span == nil {
return nil
}
if !span.SpanContext().IsValid() || !span.IsRecording() {
return nil
}
span.AddEvent(
entry.Message,
trace.WithAttributes(logrusDataToAttrs(entry.Data)...),
trace.WithAttributes(attribute.String("level", entry.Level.String())),
trace.WithTimestamp(entry.Time),
)
return nil
}
func logrusDataToAttrs(data logrus.Fields) []attribute.KeyValue {
attrs := make([]attribute.KeyValue, 0, len(data))
for k, v := range data {
attrs = append(attrs, any(k, v))
}
return attrs
}

@ -0,0 +1,129 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package tracing
import (
"context"
"net/http"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
"go.opentelemetry.io/otel/trace"
)
// StartConfig defines configuration for a new span object.
type StartConfig struct {
spanOpts []trace.SpanStartOption
}
type SpanOpt func(config *StartConfig)
// WithHTTPRequest marks span as a HTTP request operation from client to server.
// It'll append attributes from the HTTP request object and mark it with `SpanKindClient` type.
//
// Deprecated: use upstream functionality from otelhttp directly instead. This function is kept for API compatibility
// but no longer works as expected due to required functionality no longer exported in OpenTelemetry libraries.
func WithHTTPRequest(_ *http.Request) SpanOpt {
return func(config *StartConfig) {
config.spanOpts = append(config.spanOpts,
trace.WithSpanKind(trace.SpanKindClient), // A client making a request to a server
)
}
}
// UpdateHTTPClient updates the http client with the necessary otel transport
func UpdateHTTPClient(client *http.Client, name string) {
client.Transport = otelhttp.NewTransport(
client.Transport,
otelhttp.WithSpanNameFormatter(func(operation string, r *http.Request) string {
return name
}),
)
}
// StartSpan starts child span in a context.
func StartSpan(ctx context.Context, opName string, opts ...SpanOpt) (context.Context, *Span) {
config := StartConfig{}
for _, fn := range opts {
fn(&config)
}
tracer := otel.Tracer("")
if parent := trace.SpanFromContext(ctx); parent != nil && parent.SpanContext().IsValid() {
tracer = parent.TracerProvider().Tracer("")
}
ctx, span := tracer.Start(ctx, opName, config.spanOpts...)
return ctx, &Span{otelSpan: span}
}
// SpanFromContext returns the current Span from the context.
func SpanFromContext(ctx context.Context) *Span {
return &Span{
otelSpan: trace.SpanFromContext(ctx),
}
}
// Span is wrapper around otel trace.Span.
// Span is the individual component of a trace. It represents a
// single named and timed operation of a workflow that is traced.
type Span struct {
otelSpan trace.Span
}
// End completes the span.
func (s *Span) End() {
s.otelSpan.End()
}
// AddEvent adds an event with provided name and options.
func (s *Span) AddEvent(name string, options ...trace.EventOption) {
s.otelSpan.AddEvent(name, options...)
}
// SetStatus sets the status of the current span.
// If an error is encountered, it records the error and sets span status to Error.
func (s *Span) SetStatus(err error) {
if err != nil {
s.otelSpan.RecordError(err)
s.otelSpan.SetStatus(codes.Error, err.Error())
} else {
s.otelSpan.SetStatus(codes.Ok, "")
}
}
// SetAttributes sets kv as attributes of the span.
func (s *Span) SetAttributes(kv ...attribute.KeyValue) {
s.otelSpan.SetAttributes(kv...)
}
// Name sets the span name by joining a list of strings in dot separated format.
func Name(names ...string) string {
return makeSpanName(names...)
}
// Attribute takes a key value pair and returns attribute.KeyValue type.
func Attribute(k string, v interface{}) attribute.KeyValue {
return any(k, v)
}
// HTTPStatusCodeAttributes generates attributes of the HTTP namespace as specified by the OpenTelemetry
// specification for a span.
func HTTPStatusCodeAttributes(code int) []attribute.KeyValue {
return []attribute.KeyValue{semconv.HTTPStatusCodeKey.Int(code)}
}

@ -1195,6 +1195,7 @@ definitions:
- "default"
- "process"
- "hyperv"
- ""
MaskedPaths:
type: "array"
description: |
@ -4180,6 +4181,7 @@ definitions:
- "default"
- "process"
- "hyperv"
- ""
Init:
description: |
Run an init inside the container that forwards signals and reaps
@ -5750,6 +5752,7 @@ definitions:
- "default"
- "hyperv"
- "process"
- ""
InitBinary:
description: |
Name and, optional, path of the `docker-init` binary.
@ -5820,8 +5823,6 @@ definitions:
type: "string"
example:
- "WARNING: No memory limit support"
- "WARNING: bridge-nf-call-iptables is disabled"
- "WARNING: bridge-nf-call-ip6tables is disabled"
CDISpecDirs:
description: |
List of directories where (Container Device Interface) CDI
@ -7876,10 +7877,12 @@ paths:
type: "string"
- name: "h"
in: "query"
required: true
description: "Height of the TTY session in characters"
type: "integer"
- name: "w"
in: "query"
required: true
description: "Width of the TTY session in characters"
type: "integer"
tags: ["Container"]
@ -9244,6 +9247,19 @@ paths:
all tags of the given image that are present in the local image store
are pushed.
type: "string"
- name: "platform"
type: "string"
in: "query"
description: |
JSON-encoded OCI platform to select the platform-variant to push.
If not provided, all available variants will attempt to be pushed.
If the daemon provides a multi-platform image store, this selects
the platform-variant to push to the registry. If the image is
a single-platform image, or if the multi-platform image does not
provide a variant matching the given platform, an error is returned.
Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}`
- name: "X-Registry-Auth"
in: "header"
description: |
@ -9253,11 +9269,6 @@ paths:
details.
type: "string"
required: true
- name: "platform"
in: "query"
description: "Select a platform-specific manifest to be pushed. OCI platform (JSON encoded)"
type: "string"
x-nullable: true
tags: ["Image"]
/images/{name}/tag:
post:
@ -9553,7 +9564,7 @@ paths:
type: "string"
example: "OK"
headers:
API-Version:
Api-Version:
type: "string"
description: "Max API Version the server supports"
Builder-Version:
@ -9609,7 +9620,7 @@ paths:
type: "string"
example: "(empty)"
headers:
API-Version:
Api-Version:
type: "string"
description: "Max API Version the server supports"
Builder-Version:
@ -10203,10 +10214,12 @@ paths:
type: "string"
- name: "h"
in: "query"
required: true
description: "Height of the TTY session in characters"
type: "integer"
- name: "w"
in: "query"
required: true
description: "Width of the TTY session in characters"
type: "integer"
tags: ["Exec"]
@ -11622,6 +11635,7 @@ paths:
example:
ListenAddr: "0.0.0.0:2377"
AdvertiseAddr: "192.168.1.1:2377"
DataPathAddr: "192.168.1.1"
RemoteAddrs:
- "node1:2377"
JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2"

@ -10,7 +10,7 @@ import (
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/api/types/strslice"
"github.com/docker/go-connections/nat"
units "github.com/docker/go-units"
"github.com/docker/go-units"
)
// CgroupnsMode represents the cgroup namespace mode of the container

@ -484,4 +484,6 @@ type BuildCachePruneOptions struct {
All bool
KeepStorage int64
Filters filters.Args
// FIXME(thaJeztah): add new options; see https://github.com/moby/moby/issues/48639
}

@ -2,7 +2,7 @@
Package client is a Go client for the Docker Engine API.
For more information about the Engine API, see the documentation:
https://docs.docker.com/engine/api/
https://docs.docker.com/reference/api/engine/
# Usage
@ -247,6 +247,14 @@ func (cli *Client) tlsConfig() *tls.Config {
func defaultHTTPClient(hostURL *url.URL) (*http.Client, error) {
transport := &http.Transport{}
// Necessary to prevent long-lived processes using the
// client from leaking connections due to idle connections
// not being released.
// TODO: see if we can also address this from the server side,
// or in go-connections.
// see: https://github.com/moby/moby/issues/45539
transport.MaxIdleConns = 6
transport.IdleConnTimeout = 30 * time.Second
err := sockets.ConfigureTransport(transport, hostURL.Scheme, hostURL.Host)
if err != nil {
return nil, err

@ -56,8 +56,8 @@ func parsePingResponse(cli *Client, resp serverResponse) (types.Ping, error) {
err := cli.checkResponseErr(resp)
return ping, errdefs.FromStatusCode(err, resp.statusCode)
}
ping.APIVersion = resp.header.Get("API-Version")
ping.OSType = resp.header.Get("OSType")
ping.APIVersion = resp.header.Get("Api-Version")
ping.OSType = resp.header.Get("Ostype")
if resp.header.Get("Docker-Experimental") == "true" {
ping.Experimental = true
}

@ -87,7 +87,7 @@ type pluginAdapterWithRead struct {
*pluginAdapter
}
func (a *pluginAdapterWithRead) ReadLogs(config ReadConfig) *LogWatcher {
func (a *pluginAdapterWithRead) ReadLogs(ctx context.Context, config ReadConfig) *LogWatcher {
watcher := NewLogWatcher()
go func() {
@ -101,6 +101,10 @@ func (a *pluginAdapterWithRead) ReadLogs(config ReadConfig) *LogWatcher {
dec := logdriver.NewLogEntryDecoder(stream)
for {
if ctx.Err() != nil {
return
}
var buf logdriver.LogEntry
if err := dec.Decode(&buf); err != nil {
if err == io.EOF {
@ -127,6 +131,8 @@ func (a *pluginAdapterWithRead) ReadLogs(config ReadConfig) *LogWatcher {
// send the message unless the consumer is gone
select {
case watcher.Msg <- msg:
case <-ctx.Done():
return
case <-watcher.WatchConsumerGone():
return
}

@ -7,7 +7,7 @@ import (
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/pkg/plugingetter"
units "github.com/docker/go-units"
"github.com/docker/go-units"
"github.com/pkg/errors"
)

@ -13,7 +13,7 @@ import (
"github.com/docker/docker/daemon/logger"
"github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog"
"github.com/docker/docker/daemon/logger/loggerutils"
units "github.com/docker/go-units"
"github.com/docker/go-units"
"github.com/pkg/errors"
)

@ -12,10 +12,12 @@ import (
"github.com/docker/docker/pkg/tailfile"
)
var _ logger.LogReader = (*JSONFileLogger)(nil)
// ReadLogs implements the logger's LogReader interface for the logs
// created by this driver.
func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher {
return l.writer.ReadLogs(config)
func (l *JSONFileLogger) ReadLogs(ctx context.Context, config logger.ReadConfig) *logger.LogWatcher {
return l.writer.ReadLogs(ctx, config)
}
func decodeLogLine(dec *json.Decoder, l *jsonlog.JSONLog) (*logger.Message, error) {
@ -79,6 +81,6 @@ func decodeFunc(rdr io.Reader) loggerutils.Decoder {
}
}
func getTailReader(ctx context.Context, r loggerutils.SizeReaderAt, req int) (io.Reader, int, error) {
func getTailReader(ctx context.Context, r loggerutils.SizeReaderAt, req int) (loggerutils.SizeReaderAt, int, error) {
return tailfile.NewTailReader(ctx, r, req)
}

@ -8,6 +8,7 @@
package logger // import "github.com/docker/docker/daemon/logger"
import (
"context"
"sync"
"time"
@ -88,7 +89,7 @@ type ReadConfig struct {
// LogReader is the interface for reading log messages for loggers that support reading.
type LogReader interface {
// ReadLogs reads logs from underlying logging backend.
ReadLogs(ReadConfig) *LogWatcher
ReadLogs(context.Context, ReadConfig) *LogWatcher
}
// LogWatcher is used when consuming logs read from the LogReader interface.

@ -22,8 +22,8 @@ type follow struct {
}
// Do follows the log file as it is written, starting from f at read.
func (fl *follow) Do(f *os.File, read logPos) {
fl.log = log.G(context.TODO()).WithFields(log.Fields{
func (fl *follow) Do(ctx context.Context, f *os.File, read logPos) {
fl.log = log.G(ctx).WithFields(log.Fields{
"module": "logger",
"file": f.Name(),
})
@ -38,7 +38,7 @@ func (fl *follow) Do(f *os.File, read logPos) {
}()
for {
wrote, ok := fl.nextPos(read)
wrote, ok := fl.nextPos(ctx, read)
if !ok {
return
}
@ -49,7 +49,7 @@ func (fl *follow) Do(f *os.File, read logPos) {
fl.Watcher.Err <- err
return
}
if !fl.forward(f) {
if !fl.forward(ctx, f) {
return
}
@ -91,7 +91,7 @@ func (fl *follow) Do(f *os.File, read logPos) {
read.size = 0
}
if !fl.forward(io.NewSectionReader(f, read.size, wrote.size-read.size)) {
if !fl.forward(ctx, io.NewSectionReader(f, read.size, wrote.size-read.size)) {
return
}
read = wrote
@ -100,9 +100,11 @@ func (fl *follow) Do(f *os.File, read logPos) {
// nextPos waits until the write position of the LogFile being followed has
// advanced from current and returns the new position.
func (fl *follow) nextPos(current logPos) (next logPos, ok bool) {
func (fl *follow) nextPos(ctx context.Context, current logPos) (next logPos, ok bool) {
var st logReadState
select {
case <-ctx.Done():
return current, false
case <-fl.Watcher.WatchConsumerGone():
return current, false
case st = <-fl.LogFile.read:
@ -135,7 +137,7 @@ func (fl *follow) nextPos(current logPos) (next logPos, ok bool) {
// forward decodes log messages from r and forwards them to the log watcher.
//
// The return value, cont, signals whether following should continue.
func (fl *follow) forward(r io.Reader) (cont bool) {
func (fl *follow) forward(ctx context.Context, r io.Reader) (cont bool) {
fl.Decoder.Reset(r)
return fl.Forwarder.Do(fl.Watcher, fl.Decoder)
return fl.Forwarder.Do(ctx, fl.Watcher, fl.Decoder.Decode)
}

@ -1,3 +1,6 @@
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
//go:build go1.22
package loggerutils // import "github.com/docker/docker/daemon/logger/loggerutils"
import (
@ -9,14 +12,18 @@ import (
"io/fs"
"math"
"os"
"slices"
"strconv"
"sync"
"time"
"github.com/containerd/containerd/tracing"
"github.com/containerd/log"
"github.com/docker/docker/daemon/logger"
"github.com/docker/docker/pkg/pools"
"github.com/pkg/errors"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
// rotateFileMetadata is a metadata of the gzip header of the compressed log file
@ -107,16 +114,11 @@ type SizeReaderAt interface {
Size() int64
}
type readAtCloser interface {
io.ReaderAt
io.Closer
}
// GetTailReaderFunc is used to truncate a reader to only read as much as is required
// in order to get the passed in number of log lines.
// It returns the sectioned reader, the number of lines that the section reader
// contains, and any error that occurs.
type GetTailReaderFunc func(ctx context.Context, f SizeReaderAt, nLogLines int) (rdr io.Reader, nLines int, err error)
type GetTailReaderFunc func(ctx context.Context, f SizeReaderAt, nLogLines int) (rdr SizeReaderAt, nLines int, err error)
// NewLogFile creates new LogFile
func NewLogFile(logPath string, capacity int64, maxFiles int, compress bool, decodeFunc MakeDecoderFn, perms os.FileMode, getTailReader GetTailReaderFunc) (*LogFile, error) {
@ -377,7 +379,12 @@ func (w *LogFile) Close() error {
// ReadLogs decodes entries from log files.
//
// It is the caller's responsibility to call ConsumerGone on the LogWatcher.
func (w *LogFile) ReadLogs(config logger.ReadConfig) *logger.LogWatcher {
func (w *LogFile) ReadLogs(ctx context.Context, config logger.ReadConfig) *logger.LogWatcher {
ctx, span := tracing.StartSpan(ctx, "logger.LogFile.ReadLogs")
defer span.End()
span.SetAttributes(tracing.Attribute("config", config))
watcher := logger.NewLogWatcher()
// Lock out filesystem operations so that we can capture the read
// position and atomically open the corresponding log file, without the
@ -389,19 +396,104 @@ func (w *LogFile) ReadLogs(config logger.ReadConfig) *logger.LogWatcher {
st := <-w.read
pos := st.pos
w.read <- st
go w.readLogsLocked(pos, config, watcher)
go w.readLogsLocked(ctx, pos, config, watcher)
return watcher
}
// tailFiles must be called with w.fsopMu locked for reads.
// w.fsopMu.RUnlock() is called before returning.
func (w *LogFile) tailFiles(ctx context.Context, config logger.ReadConfig, watcher *logger.LogWatcher, current SizeReaderAt, dec Decoder, fwd *forwarder) (cont bool) {
if config.Tail == 0 {
w.fsopMu.RUnlock()
return true
}
ctx, span := tracing.StartSpan(ctx, "logger.Logfile.TailLogs")
defer func() {
span.SetAttributes(attribute.Bool("continue", cont))
span.End()
}()
files, err := w.openRotatedFiles(ctx, config)
w.fsopMu.RUnlock()
if err != nil {
// TODO: Should we allow this to continue (as in set `cont=true`) and not error out the log stream?
err = errors.Wrap(err, "error opening rotated log files")
span.SetStatus(err)
watcher.Err <- err
return false
}
if current.Size() > 0 {
files = append(files, &sizeReaderAtOpener{current, "current"})
}
return tailFiles(ctx, files, watcher, dec, w.getTailReader, config.Tail, fwd)
}
type sizeReaderAtOpener struct {
SizeReaderAt
ref string
}
func (o *sizeReaderAtOpener) ReaderAt(context.Context) (sizeReaderAtCloser, error) {
return &sizeReaderAtWithCloser{o, nil}, nil
}
func (o *sizeReaderAtOpener) Close() {}
func (o *sizeReaderAtOpener) Ref() string {
return o.ref
}
type sizeReaderAtWithCloser struct {
SizeReaderAt
close func() error
}
func (r *sizeReaderAtWithCloser) ReadAt(p []byte, offset int64) (int, error) {
if r.SizeReaderAt == nil {
return 0, io.EOF
}
return r.SizeReaderAt.ReadAt(p, offset)
}
func (r *sizeReaderAtWithCloser) Read(p []byte) (int, error) {
if r.SizeReaderAt == nil {
return 0, io.EOF
}
return r.SizeReaderAt.Read(p)
}
func (r *sizeReaderAtWithCloser) Size() int64 {
if r.SizeReaderAt == nil {
return 0
}
return r.SizeReaderAt.Size()
}
func (r *sizeReaderAtWithCloser) Close() error {
if r.close != nil {
return r.close()
}
return nil
}
// readLogsLocked is the bulk of the implementation of ReadLogs.
//
// w.fsopMu must be locked for reading when calling this method.
// w.fsopMu.RUnlock() is called before returning.
func (w *LogFile) readLogsLocked(currentPos logPos, config logger.ReadConfig, watcher *logger.LogWatcher) {
func (w *LogFile) readLogsLocked(ctx context.Context, currentPos logPos, config logger.ReadConfig, watcher *logger.LogWatcher) {
ctx, span := tracing.StartSpan(ctx, "logger.Logfile.ReadLogsLocked")
defer span.End()
defer close(watcher.Msg)
currentFile, err := open(w.f.Name())
if err != nil {
w.fsopMu.RUnlock()
span.SetStatus(err)
watcher.Err <- err
return
}
@ -410,53 +502,13 @@ func (w *LogFile) readLogsLocked(currentPos logPos, config logger.ReadConfig, wa
dec := w.createDecoder(nil)
defer dec.Close()
currentChunk := io.NewSectionReader(currentFile, 0, currentPos.size)
fwd := newForwarder(config)
if config.Tail != 0 {
// TODO(@cpuguy83): Instead of opening every file, only get the files which
// are needed to tail.
// This is especially costly when compression is enabled.
files, err := w.openRotatedFiles(config)
if err != nil {
watcher.Err <- err
return
}
closeFiles := func() {
for _, f := range files {
f.Close()
}
}
readers := make([]SizeReaderAt, 0, len(files)+1)
for _, f := range files {
switch ff := f.(type) {
case SizeReaderAt:
readers = append(readers, ff)
case interface{ Stat() (fs.FileInfo, error) }:
stat, err := ff.Stat()
if err != nil {
watcher.Err <- errors.Wrap(err, "error reading size of rotated file")
closeFiles()
return
}
readers = append(readers, io.NewSectionReader(f, 0, stat.Size()))
default:
panic(fmt.Errorf("rotated file value %#v (%[1]T) has neither Size() nor Stat() methods", f))
}
}
if currentChunk.Size() > 0 {
readers = append(readers, currentChunk)
}
// At this point, w.tailFiles is responsible for unlocking w.fsopmu
ok := w.tailFiles(ctx, config, watcher, io.NewSectionReader(currentFile, 0, currentPos.size), dec, fwd)
ok := tailFiles(readers, watcher, dec, w.getTailReader, config.Tail, fwd)
closeFiles()
if !ok {
return
}
} else {
w.fsopMu.RUnlock()
if !ok {
return
}
if !config.Follow {
@ -468,117 +520,265 @@ func (w *LogFile) readLogsLocked(currentPos logPos, config logger.ReadConfig, wa
Watcher: watcher,
Decoder: dec,
Forwarder: fwd,
}).Do(currentFile, currentPos)
}).Do(ctx, currentFile, currentPos)
}
type fileOpener interface {
ReaderAt(context.Context) (ra sizeReaderAtCloser, err error)
Close()
Ref() string
}
// simpleFileOpener just holds a reference to an already open file
type simpleFileOpener struct {
f *os.File
sz int64
closed bool
}
func (o *simpleFileOpener) ReaderAt(context.Context) (sizeReaderAtCloser, error) {
if o.closed {
return nil, errors.New("file is closed")
}
if o.sz == 0 {
stat, err := o.f.Stat()
if err != nil {
return nil, errors.Wrap(err, "error stating file")
}
o.sz = stat.Size()
}
return &sizeReaderAtWithCloser{io.NewSectionReader(o.f, 0, o.sz), nil}, nil
}
func (o *simpleFileOpener) Ref() string {
return o.f.Name()
}
func (o *simpleFileOpener) Close() {
_ = o.f.Close()
o.closed = true
}
// converter function used by shareTempFileConverter
func decompress(dst io.WriteSeeker, src io.ReadSeeker) error {
if _, err := src.Seek(0, io.SeekStart); err != nil {
return err
}
rc, err := gzip.NewReader(src)
if err != nil {
return err
}
_, err = pools.Copy(dst, rc)
if err != nil {
return err
}
return rc.Close()
}
// compressedFileOpener holds a reference to compressed a log file and will
// lazily open a decompressed version of the file.
type compressedFileOpener struct {
closed bool
f *os.File
lf *LogFile
ifBefore time.Time
}
func (cfo *compressedFileOpener) ReaderAt(ctx context.Context) (_ sizeReaderAtCloser, retErr error) {
_, span := tracing.StartSpan(ctx, "logger.Logfile.Compressed.ReaderAt")
defer func() {
if retErr != nil {
span.SetStatus(retErr)
}
span.End()
}()
span.SetAttributes(attribute.String("file", cfo.f.Name()))
if cfo.closed {
return nil, errors.New("compressed file closed")
}
gzr, err := gzip.NewReader(cfo.f)
if err != nil {
return nil, err
}
defer gzr.Close()
// Extract the last log entry timestamp from the gzip header
// Use this to determine if we even need to read this file based on inputs
extra := &rotateFileMetadata{}
err = json.Unmarshal(gzr.Header.Extra, extra)
if err == nil && !extra.LastTime.IsZero() && extra.LastTime.Before(cfo.ifBefore) {
span.SetAttributes(attribute.Bool("skip", true))
return &sizeReaderAtWithCloser{}, nil
}
if err == nil {
span.SetAttributes(attribute.Stringer("lastLogTime", extra.LastTime))
}
span.AddEvent("Start decompress")
return cfo.lf.decompress.Do(cfo.f)
}
func (cfo *compressedFileOpener) Close() {
cfo.closed = true
cfo.f.Close()
}
func (cfo *compressedFileOpener) Ref() string {
return cfo.f.Name()
}
type emptyFileOpener struct{}
func (emptyFileOpener) ReaderAt(context.Context) (sizeReaderAtCloser, error) {
return &sizeReaderAtWithCloser{}, nil
}
func (emptyFileOpener) Close() {}
func (emptyFileOpener) Ref() string {
return "null"
}
// openRotatedFiles returns a slice of files open for reading, in order from
// oldest to newest, and calls w.fsopMu.RUnlock() before returning.
//
// This method must only be called with w.fsopMu locked for reading.
func (w *LogFile) openRotatedFiles(config logger.ReadConfig) (files []readAtCloser, err error) {
type rotatedFile struct {
f *os.File
compressed bool
}
func (w *LogFile) openRotatedFiles(ctx context.Context, config logger.ReadConfig) (_ []fileOpener, retErr error) {
var out []fileOpener
var q []rotatedFile
defer func() {
if err != nil {
for _, qq := range q {
qq.f.Close()
}
for _, f := range files {
f.Close()
if retErr != nil {
for _, fo := range out {
fo.Close()
}
}
}()
q, err = func() (q []rotatedFile, err error) {
defer w.fsopMu.RUnlock()
for i := w.maxFiles; i > 1; i-- {
fo, err := w.openRotatedFile(ctx, i-1, config)
if err != nil {
return nil, err
}
out = append(out, fo)
}
q = make([]rotatedFile, 0, w.maxFiles)
for i := w.maxFiles; i > 1; i-- {
var f rotatedFile
f.f, err = open(fmt.Sprintf("%s.%d", w.f.Name(), i-1))
if err != nil {
if !errors.Is(err, fs.ErrNotExist) {
return nil, errors.Wrap(err, "error opening rotated log file")
}
f.compressed = true
f.f, err = open(fmt.Sprintf("%s.%d.gz", w.f.Name(), i-1))
if err != nil {
if !errors.Is(err, fs.ErrNotExist) {
return nil, errors.Wrap(err, "error opening file for decompression")
}
continue
return out, nil
}
func (w *LogFile) openRotatedFile(ctx context.Context, i int, config logger.ReadConfig) (fileOpener, error) {
f, err := open(fmt.Sprintf("%s.%d", w.f.Name(), i))
if err == nil {
return &simpleFileOpener{
f: f,
}, nil
}
if !errors.Is(err, fs.ErrNotExist) {
return nil, errors.Wrap(err, "error opening rotated log file")
}
f, err = open(fmt.Sprintf("%s.%d.gz", w.f.Name(), i))
if err != nil {
if !errors.Is(err, fs.ErrNotExist) {
return nil, errors.Wrap(err, "error opening file for decompression")
}
return &emptyFileOpener{}, nil
}
return &compressedFileOpener{
f: f,
lf: w,
ifBefore: config.Since,
}, nil
}
// This is used to improve type safety around tailing logs
// Some log readers require the log file to be closed, so this makes sure all
// implementers have a closer even if it may be a no-op.
// This is opposed to asserting a type.
type sizeReaderAtCloser interface {
SizeReaderAt
io.Closer
}
func getTailFiles(ctx context.Context, files []fileOpener, nLines int, getTailReader GetTailReaderFunc) (_ []sizeReaderAtCloser, retErr error) {
ctx, span := tracing.StartSpan(ctx, "logger.Logfile.CollectTailFiles")
span.SetAttributes(attribute.Int("requested_lines", nLines))
defer func() {
if retErr != nil {
span.SetStatus(retErr)
}
span.End()
}()
out := make([]sizeReaderAtCloser, 0, len(files))
defer func() {
if retErr != nil {
for _, ra := range out {
if err := ra.Close(); err != nil {
log.G(ctx).WithError(err).Warn("Error closing log reader")
}
}
q = append(q, f)
}
return q, nil
}()
if err != nil {
return nil, err
}
for len(q) > 0 {
qq := q[0]
q = q[1:]
if qq.compressed {
defer qq.f.Close()
f, err := w.maybeDecompressFile(qq.f, config)
if nLines <= 0 {
for _, fo := range files {
span.AddEvent("Open file", trace.WithAttributes(attribute.String("file", fo.Ref())))
ra, err := fo.ReaderAt(ctx)
if err != nil {
return nil, err
}
if f != nil {
// The log before `config.Since` does not need to read
files = append(files, f)
}
} else {
files = append(files, qq.f)
out = append(out, ra)
}
return out, nil
}
return files, nil
}
func (w *LogFile) maybeDecompressFile(cf *os.File, config logger.ReadConfig) (readAtCloser, error) {
rc, err := gzip.NewReader(cf)
if err != nil {
return nil, errors.Wrap(err, "error making gzip reader for compressed log file")
}
defer rc.Close()
for i := len(files) - 1; i >= 0 && nLines > 0; i-- {
if err := ctx.Err(); err != nil {
return nil, errors.Wrap(err, "stopping parsing files to tail due to error")
}
// Extract the last log entry timestramp from the gzip header
extra := &rotateFileMetadata{}
err = json.Unmarshal(rc.Header.Extra, extra)
if err == nil && !extra.LastTime.IsZero() && extra.LastTime.Before(config.Since) {
return nil, nil
}
tmpf, err := w.decompress.Do(cf)
return tmpf, errors.Wrap(err, "error decompressing log file")
}
fo := files[i]
func decompress(dst io.WriteSeeker, src io.ReadSeeker) error {
if _, err := src.Seek(0, io.SeekStart); err != nil {
return err
}
rc, err := gzip.NewReader(src)
if err != nil {
return err
}
_, err = pools.Copy(dst, rc)
if err != nil {
return err
fileAttr := attribute.String("file", fo.Ref())
span.AddEvent("Open file", trace.WithAttributes(fileAttr))
ra, err := fo.ReaderAt(ctx)
if err != nil {
return nil, err
}
span.AddEvent("Scan file to tail", trace.WithAttributes(fileAttr, attribute.Int("remaining_lines", nLines)))
tail, n, err := getTailReader(ctx, ra, nLines)
if err != nil {
ra.Close()
log.G(ctx).WithError(err).Warn("Error scanning log file for tail file request, skipping")
continue
}
nLines -= n
out = append(out, &sizeReaderAtWithCloser{tail, ra.Close})
}
return rc.Close()
slices.Reverse(out)
return out, nil
}
func tailFiles(files []SizeReaderAt, watcher *logger.LogWatcher, dec Decoder, getTailReader GetTailReaderFunc, nLines int, fwd *forwarder) (cont bool) {
ctx, cancel := context.WithCancel(context.Background())
func tailFiles(ctx context.Context, files []fileOpener, watcher *logger.LogWatcher, dec Decoder, getTailReader GetTailReaderFunc, nLines int, fwd *forwarder) (cont bool) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
cont = true
// TODO(@cpuguy83): we should plumb a context through instead of dealing with `WatchClose()` here.
go func() {
select {
case <-ctx.Done():
@ -587,27 +787,64 @@ func tailFiles(files []SizeReaderAt, watcher *logger.LogWatcher, dec Decoder, ge
}
}()
readers := make([]io.Reader, 0, len(files))
readers, err := getTailFiles(ctx, files, nLines, getTailReader)
if err != nil {
watcher.Err <- err
return false
}
if nLines > 0 {
for i := len(files) - 1; i >= 0 && nLines > 0; i-- {
tail, n, err := getTailReader(ctx, files[i], nLines)
if err != nil {
watcher.Err <- errors.Wrap(err, "error finding file position to start log tailing")
return false
var idx int
defer func() {
// Make sure all are released if there is an early return.
if !cont {
for _, r := range readers[idx:] {
if err := r.Close(); err != nil {
log.G(ctx).WithError(err).Debug("Error closing log reader")
}
}
nLines -= n
readers = append([]io.Reader{tail}, readers...)
}
} else {
for _, r := range files {
readers = append(readers, r)
}()
for _, ra := range readers {
ra := ra
select {
case <-watcher.WatchConsumerGone():
return false
case <-ctx.Done():
return false
default:
}
dec.Reset(ra)
cancel := context.AfterFunc(ctx, func() {
if err := ra.Close(); err != nil {
log.G(ctx).WithError(err).Debug("Error closing log reader")
}
})
ok := fwd.Do(ctx, watcher, func() (*logger.Message, error) {
msg, err := dec.Decode()
if err != nil && !errors.Is(err, io.EOF) {
// We have an error decoding the stream, but we don't want to error out
// the whole log reader.
// If we return anything other than EOF then the forwarder will return
// false and we'll exit the loop.
// Instead just log the error here and return an EOF so we can move to
// the next file.
log.G(ctx).WithError(err).Warn("Error decoding log file")
return nil, io.EOF
}
return msg, err
})
cancel()
idx++
if !ok {
return false
}
}
rdr := io.MultiReader(readers...)
dec.Reset(rdr)
return fwd.Do(watcher, dec)
return true
}
type forwarder struct {
@ -622,16 +859,35 @@ func newForwarder(config logger.ReadConfig) *forwarder {
// conditions to watcher. Do returns cont=true iff it has read all messages from
// dec without encountering a message with a timestamp which is after the
// configured until time.
func (fwd *forwarder) Do(watcher *logger.LogWatcher, dec Decoder) (cont bool) {
func (fwd *forwarder) Do(ctx context.Context, watcher *logger.LogWatcher, next func() (*logger.Message, error)) (cont bool) {
ctx, span := tracing.StartSpan(ctx, "logger.Logfile.Forward")
defer func() {
span.SetAttributes(attribute.Bool("continue", cont))
span.End()
}()
for {
msg, err := dec.Decode()
select {
case <-watcher.WatchConsumerGone():
span.AddEvent("watch consumer gone")
return false
case <-ctx.Done():
span.AddEvent(ctx.Err().Error())
return false
default:
}
msg, err := next()
if err != nil {
if errors.Is(err, io.EOF) {
span.AddEvent("EOF")
return true
}
watcher.Err <- err
span.SetStatus(err)
log.G(ctx).WithError(err).Debug("Error while decoding log entry, not continuing")
return false
}
if !fwd.since.IsZero() {
if msg.Timestamp.Before(fwd.since) {
continue
@ -643,10 +899,16 @@ func (fwd *forwarder) Do(watcher *logger.LogWatcher, dec Decoder) (cont bool) {
fwd.since = time.Time{}
}
if !fwd.until.IsZero() && msg.Timestamp.After(fwd.until) {
log.G(ctx).Debug("Log is newer than requested window, skipping remaining logs")
return false
}
select {
case <-ctx.Done():
span.AddEvent(ctx.Err().Error())
return false
case <-watcher.WatchConsumerGone():
span.AddEvent("watch consumer gone")
return false
case watcher.Msg <- msg:
}

@ -76,7 +76,7 @@ func (c *sharedTempFileConverter) Do(f *os.File) (*sharedFileReader, error) {
// ModTime, which conveniently also handles the case of true
// positives where the file has also been modified since it was
// first converted.
if os.SameFile(tf.src, stat) && tf.src.ModTime() == stat.ModTime() {
if os.SameFile(tf.src, stat) && tf.src.ModTime().Equal(stat.ModTime()) {
return c.openExisting(st, id, tf)
}
}

@ -1,6 +1,7 @@
package logger // import "github.com/docker/docker/daemon/logger"
import (
"context"
"errors"
"sync"
"sync/atomic"
@ -20,19 +21,22 @@ type RingLogger struct {
wg sync.WaitGroup
}
var _ SizedLogger = &RingLogger{}
var (
_ SizedLogger = (*RingLogger)(nil)
_ LogReader = (*ringWithReader)(nil)
)
type ringWithReader struct {
*RingLogger
}
func (r *ringWithReader) ReadLogs(cfg ReadConfig) *LogWatcher {
func (r *ringWithReader) ReadLogs(ctx context.Context, cfg ReadConfig) *LogWatcher {
reader, ok := r.l.(LogReader)
if !ok {
// something is wrong if we get here
panic("expected log reader")
}
return reader.ReadLogs(cfg)
return reader.ReadLogs(ctx, cfg)
}
func newRingLogger(driver Logger, logInfo Info, maxSize int64) *RingLogger {

@ -18,6 +18,8 @@ const blockThreshold = 1e6
var (
// ErrClosed is returned when Write is called on a closed BytesPipe.
//
// Deprecated: this type is only used internally, and will be removed in the next release.
ErrClosed = errors.New("write to closed BytesPipe")
bufPools = make(map[int]*sync.Pool)
@ -28,6 +30,8 @@ var (
// All written data may be read at most once. Also, BytesPipe allocates
// and releases new byte slices to adjust to current needs, so the buffer
// won't be overgrown after peak loads.
//
// Deprecated: this type is only used internally, and will be removed in the next release.
type BytesPipe struct {
mu sync.Mutex
wait *sync.Cond
@ -40,6 +44,8 @@ type BytesPipe struct {
// NewBytesPipe creates new BytesPipe, initialized by specified slice.
// If buf is nil, then it will be initialized with slice which cap is 64.
// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf).
//
// Deprecated: this function is only used internally, and will be removed in the next release.
func NewBytesPipe() *BytesPipe {
bp := &BytesPipe{}
bp.buf = append(bp.buf, getBuffer(minCap))

@ -80,13 +80,19 @@ func (wf *WriteFlusher) Close() error {
return nil
}
// nopFlusher represents a type which flush operation is nop.
type nopFlusher struct{}
// Flush is a nop operation.
func (f *nopFlusher) Flush() {}
// NewWriteFlusher returns a new WriteFlusher.
func NewWriteFlusher(w io.Writer) *WriteFlusher {
var fl flusher
if f, ok := w.(flusher); ok {
fl = f
} else {
fl = &NopFlusher{}
fl = &nopFlusher{}
}
return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})}
}

@ -6,6 +6,8 @@ import (
)
// NopWriter represents a type which write operation is nop.
//
// Deprecated: use [io.Discard] instead. This type will be removed in the next release.
type NopWriter struct{}
func (*NopWriter) Write(buf []byte) (int, error) {
@ -19,15 +21,16 @@ type nopWriteCloser struct {
func (w *nopWriteCloser) Close() error { return nil }
// NopWriteCloser returns a nopWriteCloser.
//
// Deprecated: This function is no longer used and will be removed in the next release.
func NopWriteCloser(w io.Writer) io.WriteCloser {
return &nopWriteCloser{w}
}
// NopFlusher represents a type which flush operation is nop.
type NopFlusher struct{}
// Flush is a nop operation.
func (f *NopFlusher) Flush() {}
//
// Deprecated: NopFlusher is only used internally and will be removed in the next release.
type NopFlusher = nopFlusher
type writeCloserWrapper struct {
io.Writer
@ -55,12 +58,16 @@ func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser {
// of bytes written to the writer during a "session".
// This can be convenient when write return is masked
// (e.g., json.Encoder.Encode())
//
// Deprecated: this type is no longer used and will be removed in the next release.
type WriteCounter struct {
Count int64
Writer io.Writer
}
// NewWriteCounter returns a new WriteCounter.
//
// Deprecated: this function is no longer used and will be removed in the next release.
func NewWriteCounter(w io.Writer) *WriteCounter {
return &WriteCounter{
Writer: w,

@ -7,7 +7,7 @@ import (
"strings"
"time"
units "github.com/docker/go-units"
"github.com/docker/go-units"
"github.com/moby/term"
"github.com/morikuni/aec"
)

@ -48,7 +48,7 @@ type SizeReaderAt interface {
}
// NewTailReader scopes the passed in reader to just the last N lines passed in
func NewTailReader(ctx context.Context, r SizeReaderAt, reqLines int) (io.Reader, int, error) {
func NewTailReader(ctx context.Context, r SizeReaderAt, reqLines int) (*io.SectionReader, int, error) {
return NewTailReaderWithDelimiter(ctx, r, reqLines, eol)
}
@ -56,7 +56,7 @@ func NewTailReader(ctx context.Context, r SizeReaderAt, reqLines int) (io.Reader
// In this case a "line" is defined by the passed in delimiter.
//
// Delimiter lengths should be generally small, no more than 12 bytes
func NewTailReaderWithDelimiter(ctx context.Context, r SizeReaderAt, reqLines int, delimiter []byte) (io.Reader, int, error) {
func NewTailReaderWithDelimiter(ctx context.Context, r SizeReaderAt, reqLines int, delimiter []byte) (*io.SectionReader, int, error) {
if reqLines < 1 {
return nil, 0, ErrNonPositiveLinesNumber
}
@ -71,7 +71,7 @@ func NewTailReaderWithDelimiter(ctx context.Context, r SizeReaderAt, reqLines in
)
if int64(len(delimiter)) >= size {
return bytes.NewReader(nil), 0, nil
return io.NewSectionReader(bytes.NewReader(nil), 0, 0), 0, nil
}
scanner := newScanner(r, delimiter)
@ -92,7 +92,7 @@ func NewTailReaderWithDelimiter(ctx context.Context, r SizeReaderAt, reqLines in
tailStart = scanner.Start(ctx)
if found == 0 {
return bytes.NewReader(nil), 0, nil
return io.NewSectionReader(bytes.NewReader(nil), 0, 0), 0, nil
}
if found < reqLines && tailStart != 0 {

@ -533,6 +533,9 @@ github.com/cncf/xds/go/xds/data/orca/v3
github.com/cncf/xds/go/xds/service/orca/v3
github.com/cncf/xds/go/xds/type/matcher/v3
github.com/cncf/xds/go/xds/type/v3
# github.com/containerd/containerd v1.7.25
## explicit; go 1.21
github.com/containerd/containerd/tracing
# github.com/containerd/fifo v1.1.0
## explicit; go 1.18
github.com/containerd/fifo
@ -583,7 +586,7 @@ github.com/distribution/reference
## explicit; go 1.13
github.com/dlclark/regexp2
github.com/dlclark/regexp2/syntax
# github.com/docker/docker v27.3.1+incompatible
# github.com/docker/docker v27.5.0+incompatible
## explicit
github.com/docker/docker/api
github.com/docker/docker/api/types

Loading…
Cancel
Save