mirror of https://github.com/grafana/loki
fix(deps): update module k8s.io/apimachinery to v0.33.0 (main) (#17429)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>pull/17430/head
parent
82acbd58b2
commit
0851b6fa08
@ -1,180 +0,0 @@ |
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ptypes |
||||
|
||||
import ( |
||||
"fmt" |
||||
"strings" |
||||
|
||||
"github.com/golang/protobuf/proto" |
||||
"google.golang.org/protobuf/reflect/protoreflect" |
||||
"google.golang.org/protobuf/reflect/protoregistry" |
||||
|
||||
anypb "github.com/golang/protobuf/ptypes/any" |
||||
) |
||||
|
||||
const urlPrefix = "type.googleapis.com/" |
||||
|
||||
// AnyMessageName returns the message name contained in an anypb.Any message.
|
||||
// Most type assertions should use the Is function instead.
|
||||
//
|
||||
// Deprecated: Call the any.MessageName method instead.
|
||||
func AnyMessageName(any *anypb.Any) (string, error) { |
||||
name, err := anyMessageName(any) |
||||
return string(name), err |
||||
} |
||||
func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) { |
||||
if any == nil { |
||||
return "", fmt.Errorf("message is nil") |
||||
} |
||||
name := protoreflect.FullName(any.TypeUrl) |
||||
if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 { |
||||
name = name[i+len("/"):] |
||||
} |
||||
if !name.IsValid() { |
||||
return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) |
||||
} |
||||
return name, nil |
||||
} |
||||
|
||||
// MarshalAny marshals the given message m into an anypb.Any message.
|
||||
//
|
||||
// Deprecated: Call the anypb.New function instead.
|
||||
func MarshalAny(m proto.Message) (*anypb.Any, error) { |
||||
switch dm := m.(type) { |
||||
case DynamicAny: |
||||
m = dm.Message |
||||
case *DynamicAny: |
||||
if dm == nil { |
||||
return nil, proto.ErrNil |
||||
} |
||||
m = dm.Message |
||||
} |
||||
b, err := proto.Marshal(m) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil |
||||
} |
||||
|
||||
// Empty returns a new message of the type specified in an anypb.Any message.
|
||||
// It returns protoregistry.NotFound if the corresponding message type could not
|
||||
// be resolved in the global registry.
|
||||
//
|
||||
// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead
|
||||
// to resolve the message name and create a new instance of it.
|
||||
func Empty(any *anypb.Any) (proto.Message, error) { |
||||
name, err := anyMessageName(any) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
mt, err := protoregistry.GlobalTypes.FindMessageByName(name) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return proto.MessageV1(mt.New().Interface()), nil |
||||
} |
||||
|
||||
// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message
|
||||
// into the provided message m. It returns an error if the target message
|
||||
// does not match the type in the Any message or if an unmarshal error occurs.
|
||||
//
|
||||
// The target message m may be a *DynamicAny message. If the underlying message
|
||||
// type could not be resolved, then this returns protoregistry.NotFound.
|
||||
//
|
||||
// Deprecated: Call the any.UnmarshalTo method instead.
|
||||
func UnmarshalAny(any *anypb.Any, m proto.Message) error { |
||||
if dm, ok := m.(*DynamicAny); ok { |
||||
if dm.Message == nil { |
||||
var err error |
||||
dm.Message, err = Empty(any) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
} |
||||
m = dm.Message |
||||
} |
||||
|
||||
anyName, err := AnyMessageName(any) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
msgName := proto.MessageName(m) |
||||
if anyName != msgName { |
||||
return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName) |
||||
} |
||||
return proto.Unmarshal(any.Value, m) |
||||
} |
||||
|
||||
// Is reports whether the Any message contains a message of the specified type.
|
||||
//
|
||||
// Deprecated: Call the any.MessageIs method instead.
|
||||
func Is(any *anypb.Any, m proto.Message) bool { |
||||
if any == nil || m == nil { |
||||
return false |
||||
} |
||||
name := proto.MessageName(m) |
||||
if !strings.HasSuffix(any.TypeUrl, name) { |
||||
return false |
||||
} |
||||
return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/' |
||||
} |
||||
|
||||
// DynamicAny is a value that can be passed to UnmarshalAny to automatically
|
||||
// allocate a proto.Message for the type specified in an anypb.Any message.
|
||||
// The allocated message is stored in the embedded proto.Message.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var x ptypes.DynamicAny
|
||||
// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
|
||||
// fmt.Printf("unmarshaled message: %v", x.Message)
|
||||
//
|
||||
// Deprecated: Use the any.UnmarshalNew method instead to unmarshal
|
||||
// the any message contents into a new instance of the underlying message.
|
||||
type DynamicAny struct{ proto.Message } |
||||
|
||||
func (m DynamicAny) String() string { |
||||
if m.Message == nil { |
||||
return "<nil>" |
||||
} |
||||
return m.Message.String() |
||||
} |
||||
func (m DynamicAny) Reset() { |
||||
if m.Message == nil { |
||||
return |
||||
} |
||||
m.Message.Reset() |
||||
} |
||||
func (m DynamicAny) ProtoMessage() { |
||||
return |
||||
} |
||||
func (m DynamicAny) ProtoReflect() protoreflect.Message { |
||||
if m.Message == nil { |
||||
return nil |
||||
} |
||||
return dynamicAny{proto.MessageReflect(m.Message)} |
||||
} |
||||
|
||||
type dynamicAny struct{ protoreflect.Message } |
||||
|
||||
func (m dynamicAny) Type() protoreflect.MessageType { |
||||
return dynamicAnyType{m.Message.Type()} |
||||
} |
||||
func (m dynamicAny) New() protoreflect.Message { |
||||
return dynamicAnyType{m.Message.Type()}.New() |
||||
} |
||||
func (m dynamicAny) Interface() protoreflect.ProtoMessage { |
||||
return DynamicAny{proto.MessageV1(m.Message.Interface())} |
||||
} |
||||
|
||||
type dynamicAnyType struct{ protoreflect.MessageType } |
||||
|
||||
func (t dynamicAnyType) New() protoreflect.Message { |
||||
return dynamicAny{t.MessageType.New()} |
||||
} |
||||
func (t dynamicAnyType) Zero() protoreflect.Message { |
||||
return dynamicAny{t.MessageType.Zero()} |
||||
} |
||||
@ -1,10 +0,0 @@ |
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package ptypes provides functionality for interacting with well-known types.
|
||||
//
|
||||
// Deprecated: Well-known types have specialized functionality directly
|
||||
// injected into the generated packages for each message type.
|
||||
// See the deprecation notice for each function for the suggested alternative.
|
||||
package ptypes |
||||
@ -1,76 +0,0 @@ |
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ptypes |
||||
|
||||
import ( |
||||
"errors" |
||||
"fmt" |
||||
"time" |
||||
|
||||
durationpb "github.com/golang/protobuf/ptypes/duration" |
||||
) |
||||
|
||||
// Range of google.protobuf.Duration as specified in duration.proto.
|
||||
// This is about 10,000 years in seconds.
|
||||
const ( |
||||
maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) |
||||
minSeconds = -maxSeconds |
||||
) |
||||
|
||||
// Duration converts a durationpb.Duration to a time.Duration.
|
||||
// Duration returns an error if dur is invalid or overflows a time.Duration.
|
||||
//
|
||||
// Deprecated: Call the dur.AsDuration and dur.CheckValid methods instead.
|
||||
func Duration(dur *durationpb.Duration) (time.Duration, error) { |
||||
if err := validateDuration(dur); err != nil { |
||||
return 0, err |
||||
} |
||||
d := time.Duration(dur.Seconds) * time.Second |
||||
if int64(d/time.Second) != dur.Seconds { |
||||
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) |
||||
} |
||||
if dur.Nanos != 0 { |
||||
d += time.Duration(dur.Nanos) * time.Nanosecond |
||||
if (d < 0) != (dur.Nanos < 0) { |
||||
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) |
||||
} |
||||
} |
||||
return d, nil |
||||
} |
||||
|
||||
// DurationProto converts a time.Duration to a durationpb.Duration.
|
||||
//
|
||||
// Deprecated: Call the durationpb.New function instead.
|
||||
func DurationProto(d time.Duration) *durationpb.Duration { |
||||
nanos := d.Nanoseconds() |
||||
secs := nanos / 1e9 |
||||
nanos -= secs * 1e9 |
||||
return &durationpb.Duration{ |
||||
Seconds: int64(secs), |
||||
Nanos: int32(nanos), |
||||
} |
||||
} |
||||
|
||||
// validateDuration determines whether the durationpb.Duration is valid
|
||||
// according to the definition in google/protobuf/duration.proto.
|
||||
// A valid durpb.Duration may still be too large to fit into a time.Duration
|
||||
// Note that the range of durationpb.Duration is about 10,000 years,
|
||||
// while the range of time.Duration is about 290 years.
|
||||
func validateDuration(dur *durationpb.Duration) error { |
||||
if dur == nil { |
||||
return errors.New("duration: nil Duration") |
||||
} |
||||
if dur.Seconds < minSeconds || dur.Seconds > maxSeconds { |
||||
return fmt.Errorf("duration: %v: seconds out of range", dur) |
||||
} |
||||
if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 { |
||||
return fmt.Errorf("duration: %v: nanos out of range", dur) |
||||
} |
||||
// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
|
||||
if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) { |
||||
return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur) |
||||
} |
||||
return nil |
||||
} |
||||
@ -1,112 +0,0 @@ |
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ptypes |
||||
|
||||
import ( |
||||
"errors" |
||||
"fmt" |
||||
"time" |
||||
|
||||
timestamppb "github.com/golang/protobuf/ptypes/timestamp" |
||||
) |
||||
|
||||
// Range of google.protobuf.Duration as specified in timestamp.proto.
|
||||
const ( |
||||
// Seconds field of the earliest valid Timestamp.
|
||||
// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
|
||||
minValidSeconds = -62135596800 |
||||
// Seconds field just after the latest valid Timestamp.
|
||||
// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
|
||||
maxValidSeconds = 253402300800 |
||||
) |
||||
|
||||
// Timestamp converts a timestamppb.Timestamp to a time.Time.
|
||||
// It returns an error if the argument is invalid.
|
||||
//
|
||||
// Unlike most Go functions, if Timestamp returns an error, the first return
|
||||
// value is not the zero time.Time. Instead, it is the value obtained from the
|
||||
// time.Unix function when passed the contents of the Timestamp, in the UTC
|
||||
// locale. This may or may not be a meaningful time; many invalid Timestamps
|
||||
// do map to valid time.Times.
|
||||
//
|
||||
// A nil Timestamp returns an error. The first return value in that case is
|
||||
// undefined.
|
||||
//
|
||||
// Deprecated: Call the ts.AsTime and ts.CheckValid methods instead.
|
||||
func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) { |
||||
// Don't return the zero value on error, because corresponds to a valid
|
||||
// timestamp. Instead return whatever time.Unix gives us.
|
||||
var t time.Time |
||||
if ts == nil { |
||||
t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
|
||||
} else { |
||||
t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() |
||||
} |
||||
return t, validateTimestamp(ts) |
||||
} |
||||
|
||||
// TimestampNow returns a google.protobuf.Timestamp for the current time.
|
||||
//
|
||||
// Deprecated: Call the timestamppb.Now function instead.
|
||||
func TimestampNow() *timestamppb.Timestamp { |
||||
ts, err := TimestampProto(time.Now()) |
||||
if err != nil { |
||||
panic("ptypes: time.Now() out of Timestamp range") |
||||
} |
||||
return ts |
||||
} |
||||
|
||||
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
|
||||
// It returns an error if the resulting Timestamp is invalid.
|
||||
//
|
||||
// Deprecated: Call the timestamppb.New function instead.
|
||||
func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) { |
||||
ts := ×tamppb.Timestamp{ |
||||
Seconds: t.Unix(), |
||||
Nanos: int32(t.Nanosecond()), |
||||
} |
||||
if err := validateTimestamp(ts); err != nil { |
||||
return nil, err |
||||
} |
||||
return ts, nil |
||||
} |
||||
|
||||
// TimestampString returns the RFC 3339 string for valid Timestamps.
|
||||
// For invalid Timestamps, it returns an error message in parentheses.
|
||||
//
|
||||
// Deprecated: Call the ts.AsTime method instead,
|
||||
// followed by a call to the Format method on the time.Time value.
|
||||
func TimestampString(ts *timestamppb.Timestamp) string { |
||||
t, err := Timestamp(ts) |
||||
if err != nil { |
||||
return fmt.Sprintf("(%v)", err) |
||||
} |
||||
return t.Format(time.RFC3339Nano) |
||||
} |
||||
|
||||
// validateTimestamp determines whether a Timestamp is valid.
|
||||
// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01)
|
||||
// and has a Nanos field in the range [0, 1e9).
|
||||
//
|
||||
// If the Timestamp is valid, validateTimestamp returns nil.
|
||||
// Otherwise, it returns an error that describes the problem.
|
||||
//
|
||||
// Every valid Timestamp can be represented by a time.Time,
|
||||
// but the converse is not true.
|
||||
func validateTimestamp(ts *timestamppb.Timestamp) error { |
||||
if ts == nil { |
||||
return errors.New("timestamp: nil Timestamp") |
||||
} |
||||
if ts.Seconds < minValidSeconds { |
||||
return fmt.Errorf("timestamp: %v before 0001-01-01", ts) |
||||
} |
||||
if ts.Seconds >= maxValidSeconds { |
||||
return fmt.Errorf("timestamp: %v after 10000-01-01", ts) |
||||
} |
||||
if ts.Nanos < 0 || ts.Nanos >= 1e9 { |
||||
return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) |
||||
} |
||||
return nil |
||||
} |
||||
@ -1,64 +0,0 @@ |
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
|
||||
|
||||
package timestamp |
||||
|
||||
import ( |
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect" |
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl" |
||||
timestamppb "google.golang.org/protobuf/types/known/timestamppb" |
||||
reflect "reflect" |
||||
) |
||||
|
||||
// Symbols defined in public import of google/protobuf/timestamp.proto.
|
||||
|
||||
type Timestamp = timestamppb.Timestamp |
||||
|
||||
var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor |
||||
|
||||
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{ |
||||
0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, |
||||
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, |
||||
0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69, |
||||
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, |
||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, |
||||
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37, |
||||
0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, |
||||
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, |
||||
0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69, |
||||
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, |
||||
0x33, |
||||
} |
||||
|
||||
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{} |
||||
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{ |
||||
0, // [0:0] is the sub-list for method output_type
|
||||
0, // [0:0] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
} |
||||
|
||||
func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() } |
||||
func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() { |
||||
if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil { |
||||
return |
||||
} |
||||
type x struct{} |
||||
out := protoimpl.TypeBuilder{ |
||||
File: protoimpl.DescBuilder{ |
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(), |
||||
RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc, |
||||
NumEnums: 0, |
||||
NumMessages: 0, |
||||
NumExtensions: 0, |
||||
NumServices: 0, |
||||
}, |
||||
GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes, |
||||
DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs, |
||||
}.Build() |
||||
File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File |
||||
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil |
||||
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil |
||||
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil |
||||
} |
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,182 @@ |
||||
// Copyright 2022 Google LLC. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.35.1
|
||||
// protoc v4.23.4
|
||||
// source: openapiv3/annotations.proto
|
||||
|
||||
package openapi_v3 |
||||
|
||||
import ( |
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect" |
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl" |
||||
descriptorpb "google.golang.org/protobuf/types/descriptorpb" |
||||
reflect "reflect" |
||||
) |
||||
|
||||
const ( |
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) |
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) |
||||
) |
||||
|
||||
var file_openapiv3_annotations_proto_extTypes = []protoimpl.ExtensionInfo{ |
||||
{ |
||||
ExtendedType: (*descriptorpb.FileOptions)(nil), |
||||
ExtensionType: (*Document)(nil), |
||||
Field: 1143, |
||||
Name: "openapi.v3.document", |
||||
Tag: "bytes,1143,opt,name=document", |
||||
Filename: "openapiv3/annotations.proto", |
||||
}, |
||||
{ |
||||
ExtendedType: (*descriptorpb.MethodOptions)(nil), |
||||
ExtensionType: (*Operation)(nil), |
||||
Field: 1143, |
||||
Name: "openapi.v3.operation", |
||||
Tag: "bytes,1143,opt,name=operation", |
||||
Filename: "openapiv3/annotations.proto", |
||||
}, |
||||
{ |
||||
ExtendedType: (*descriptorpb.MessageOptions)(nil), |
||||
ExtensionType: (*Schema)(nil), |
||||
Field: 1143, |
||||
Name: "openapi.v3.schema", |
||||
Tag: "bytes,1143,opt,name=schema", |
||||
Filename: "openapiv3/annotations.proto", |
||||
}, |
||||
{ |
||||
ExtendedType: (*descriptorpb.FieldOptions)(nil), |
||||
ExtensionType: (*Schema)(nil), |
||||
Field: 1143, |
||||
Name: "openapi.v3.property", |
||||
Tag: "bytes,1143,opt,name=property", |
||||
Filename: "openapiv3/annotations.proto", |
||||
}, |
||||
} |
||||
|
||||
// Extension fields to descriptorpb.FileOptions.
|
||||
var ( |
||||
// optional openapi.v3.Document document = 1143;
|
||||
E_Document = &file_openapiv3_annotations_proto_extTypes[0] |
||||
) |
||||
|
||||
// Extension fields to descriptorpb.MethodOptions.
|
||||
var ( |
||||
// optional openapi.v3.Operation operation = 1143;
|
||||
E_Operation = &file_openapiv3_annotations_proto_extTypes[1] |
||||
) |
||||
|
||||
// Extension fields to descriptorpb.MessageOptions.
|
||||
var ( |
||||
// optional openapi.v3.Schema schema = 1143;
|
||||
E_Schema = &file_openapiv3_annotations_proto_extTypes[2] |
||||
) |
||||
|
||||
// Extension fields to descriptorpb.FieldOptions.
|
||||
var ( |
||||
// optional openapi.v3.Schema property = 1143;
|
||||
E_Property = &file_openapiv3_annotations_proto_extTypes[3] |
||||
) |
||||
|
||||
var File_openapiv3_annotations_proto protoreflect.FileDescriptor |
||||
|
||||
var file_openapiv3_annotations_proto_rawDesc = []byte{ |
||||
0x0a, 0x1b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, |
||||
0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x6f, |
||||
0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, |
||||
0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, |
||||
0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x6f, 0x70, 0x65, |
||||
0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x4f, 0x70, 0x65, 0x6e, 0x41, 0x50, 0x49, 0x76, 0x33, |
||||
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x4f, 0x0a, 0x08, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, |
||||
0x6e, 0x74, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, |
||||
0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, |
||||
0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, |
||||
0x69, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x08, 0x64, |
||||
0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x3a, 0x54, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, |
||||
0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, |
||||
0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, |
||||
0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, |
||||
0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, |
||||
0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x4c, 0x0a, |
||||
0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, |
||||
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, |
||||
0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, |
||||
0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, |
||||
0x65, 0x6d, 0x61, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x3a, 0x4e, 0x0a, 0x08, 0x70, |
||||
0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, |
||||
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, |
||||
0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, |
||||
0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, |
||||
0x61, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x42, 0x42, 0x0a, 0x0e, 0x6f, |
||||
0x72, 0x67, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0x42, 0x10, 0x41, |
||||
0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, |
||||
0x01, 0x5a, 0x16, 0x2e, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x3b, 0x6f, |
||||
0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0xa2, 0x02, 0x03, 0x4f, 0x41, 0x53, 0x62, |
||||
0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, |
||||
} |
||||
|
||||
var file_openapiv3_annotations_proto_goTypes = []any{ |
||||
(*descriptorpb.FileOptions)(nil), // 0: google.protobuf.FileOptions
|
||||
(*descriptorpb.MethodOptions)(nil), // 1: google.protobuf.MethodOptions
|
||||
(*descriptorpb.MessageOptions)(nil), // 2: google.protobuf.MessageOptions
|
||||
(*descriptorpb.FieldOptions)(nil), // 3: google.protobuf.FieldOptions
|
||||
(*Document)(nil), // 4: openapi.v3.Document
|
||||
(*Operation)(nil), // 5: openapi.v3.Operation
|
||||
(*Schema)(nil), // 6: openapi.v3.Schema
|
||||
} |
||||
var file_openapiv3_annotations_proto_depIdxs = []int32{ |
||||
0, // 0: openapi.v3.document:extendee -> google.protobuf.FileOptions
|
||||
1, // 1: openapi.v3.operation:extendee -> google.protobuf.MethodOptions
|
||||
2, // 2: openapi.v3.schema:extendee -> google.protobuf.MessageOptions
|
||||
3, // 3: openapi.v3.property:extendee -> google.protobuf.FieldOptions
|
||||
4, // 4: openapi.v3.document:type_name -> openapi.v3.Document
|
||||
5, // 5: openapi.v3.operation:type_name -> openapi.v3.Operation
|
||||
6, // 6: openapi.v3.schema:type_name -> openapi.v3.Schema
|
||||
6, // 7: openapi.v3.property:type_name -> openapi.v3.Schema
|
||||
8, // [8:8] is the sub-list for method output_type
|
||||
8, // [8:8] is the sub-list for method input_type
|
||||
4, // [4:8] is the sub-list for extension type_name
|
||||
0, // [0:4] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
} |
||||
|
||||
func init() { file_openapiv3_annotations_proto_init() } |
||||
func file_openapiv3_annotations_proto_init() { |
||||
if File_openapiv3_annotations_proto != nil { |
||||
return |
||||
} |
||||
file_openapiv3_OpenAPIv3_proto_init() |
||||
type x struct{} |
||||
out := protoimpl.TypeBuilder{ |
||||
File: protoimpl.DescBuilder{ |
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(), |
||||
RawDescriptor: file_openapiv3_annotations_proto_rawDesc, |
||||
NumEnums: 0, |
||||
NumMessages: 0, |
||||
NumExtensions: 4, |
||||
NumServices: 0, |
||||
}, |
||||
GoTypes: file_openapiv3_annotations_proto_goTypes, |
||||
DependencyIndexes: file_openapiv3_annotations_proto_depIdxs, |
||||
ExtensionInfos: file_openapiv3_annotations_proto_extTypes, |
||||
}.Build() |
||||
File_openapiv3_annotations_proto = out.File |
||||
file_openapiv3_annotations_proto_rawDesc = nil |
||||
file_openapiv3_annotations_proto_goTypes = nil |
||||
file_openapiv3_annotations_proto_depIdxs = nil |
||||
} |
||||
@ -0,0 +1,56 @@ |
||||
// Copyright 2022 Google LLC. All Rights Reserved. |
||||
// |
||||
// Licensed under the Apache License, Version 2.0 (the "License"); |
||||
// you may not use this file except in compliance with the License. |
||||
// You may obtain a copy of the License at |
||||
// |
||||
// http://www.apache.org/licenses/LICENSE-2.0 |
||||
// |
||||
// Unless required by applicable law or agreed to in writing, software |
||||
// distributed under the License is distributed on an "AS IS" BASIS, |
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
// See the License for the specific language governing permissions and |
||||
// limitations under the License. |
||||
|
||||
syntax = "proto3"; |
||||
|
||||
package openapi.v3; |
||||
|
||||
import "google/protobuf/descriptor.proto"; |
||||
import "openapiv3/OpenAPIv3.proto"; |
||||
|
||||
// The Go package name. |
||||
option go_package = "./openapiv3;openapi_v3"; |
||||
// This option lets the proto compiler generate Java code inside the package |
||||
// name (see below) instead of inside an outer class. It creates a simpler |
||||
// developer experience by reducing one-level of name nesting and be |
||||
// consistent with most programming languages that don't support outer classes. |
||||
option java_multiple_files = true; |
||||
// The Java outer classname should be the filename in UpperCamelCase. This |
||||
// class is only used to hold proto descriptor, so developers don't need to |
||||
// work with it directly. |
||||
option java_outer_classname = "AnnotationsProto"; |
||||
// The Java package name must be proto package name with proper prefix. |
||||
option java_package = "org.openapi_v3"; |
||||
// A reasonable prefix for the Objective-C symbols generated from the package. |
||||
// It should at a minimum be 3 characters long, all uppercase, and convention |
||||
// is to use an abbreviation of the package name. Something short, but |
||||
// hopefully unique enough to not conflict with things that may come along in |
||||
// the future. 'GPB' is reserved for the protocol buffer implementation itself. |
||||
option objc_class_prefix = "OAS"; |
||||
|
||||
extend google.protobuf.FileOptions { |
||||
Document document = 1143; |
||||
} |
||||
|
||||
extend google.protobuf.MethodOptions { |
||||
Operation operation = 1143; |
||||
} |
||||
|
||||
extend google.protobuf.MessageOptions { |
||||
Schema schema = 1143; |
||||
} |
||||
|
||||
extend google.protobuf.FieldOptions { |
||||
Schema property = 1143; |
||||
} |
||||
@ -1,10 +0,0 @@ |
||||
language: go |
||||
|
||||
go: |
||||
- 1.11.x |
||||
- 1.12.x |
||||
- 1.13.x |
||||
- master |
||||
|
||||
script: |
||||
- go test -cover |
||||
@ -1,67 +0,0 @@ |
||||
# How to contribute # |
||||
|
||||
We'd love to accept your patches and contributions to this project. There are |
||||
just a few small guidelines you need to follow. |
||||
|
||||
|
||||
## Contributor License Agreement ## |
||||
|
||||
Contributions to any Google project must be accompanied by a Contributor |
||||
License Agreement. This is not a copyright **assignment**, it simply gives |
||||
Google permission to use and redistribute your contributions as part of the |
||||
project. |
||||
|
||||
* If you are an individual writing original source code and you're sure you |
||||
own the intellectual property, then you'll need to sign an [individual |
||||
CLA][]. |
||||
|
||||
* If you work for a company that wants to allow you to contribute your work, |
||||
then you'll need to sign a [corporate CLA][]. |
||||
|
||||
You generally only need to submit a CLA once, so if you've already submitted |
||||
one (even if it was for a different project), you probably don't need to do it |
||||
again. |
||||
|
||||
[individual CLA]: https://developers.google.com/open-source/cla/individual |
||||
[corporate CLA]: https://developers.google.com/open-source/cla/corporate |
||||
|
||||
|
||||
## Submitting a patch ## |
||||
|
||||
1. It's generally best to start by opening a new issue describing the bug or |
||||
feature you're intending to fix. Even if you think it's relatively minor, |
||||
it's helpful to know what people are working on. Mention in the initial |
||||
issue that you are planning to work on that bug or feature so that it can |
||||
be assigned to you. |
||||
|
||||
1. Follow the normal process of [forking][] the project, and setup a new |
||||
branch to work in. It's important that each group of changes be done in |
||||
separate branches in order to ensure that a pull request only includes the |
||||
commits related to that bug or feature. |
||||
|
||||
1. Go makes it very simple to ensure properly formatted code, so always run |
||||
`go fmt` on your code before committing it. You should also run |
||||
[golint][] over your code. As noted in the [golint readme][], it's not |
||||
strictly necessary that your code be completely "lint-free", but this will |
||||
help you find common style issues. |
||||
|
||||
1. Any significant changes should almost always be accompanied by tests. The |
||||
project already has good test coverage, so look at some of the existing |
||||
tests if you're unsure how to go about it. [gocov][] and [gocov-html][] |
||||
are invaluable tools for seeing which parts of your code aren't being |
||||
exercised by your tests. |
||||
|
||||
1. Do your best to have [well-formed commit messages][] for each change. |
||||
This provides consistency throughout the project, and ensures that commit |
||||
messages are able to be formatted properly by various git tools. |
||||
|
||||
1. Finally, push the commits to your fork and submit a [pull request][]. |
||||
|
||||
[forking]: https://help.github.com/articles/fork-a-repo |
||||
[golint]: https://github.com/golang/lint |
||||
[golint readme]: https://github.com/golang/lint/blob/master/README |
||||
[gocov]: https://github.com/axw/gocov |
||||
[gocov-html]: https://github.com/matm/gocov-html |
||||
[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html |
||||
[squash]: http://git-scm.com/book/en/Git-Tools-Rewriting-History#Squashing-Commits |
||||
[pull request]: https://help.github.com/articles/creating-a-pull-request |
||||
@ -1,18 +0,0 @@ |
||||
/* |
||||
Copyright 2014 Google Inc. All rights reserved. |
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); |
||||
you may not use this file except in compliance with the License. |
||||
You may obtain a copy of the License at |
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software |
||||
distributed under the License is distributed on an "AS IS" BASIS, |
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
See the License for the specific language governing permissions and |
||||
limitations under the License. |
||||
*/ |
||||
|
||||
// Package fuzz is a library for populating go objects with random values.
|
||||
package fuzz |
||||
@ -1,605 +0,0 @@ |
||||
/* |
||||
Copyright 2014 Google Inc. All rights reserved. |
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); |
||||
you may not use this file except in compliance with the License. |
||||
You may obtain a copy of the License at |
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software |
||||
distributed under the License is distributed on an "AS IS" BASIS, |
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
See the License for the specific language governing permissions and |
||||
limitations under the License. |
||||
*/ |
||||
|
||||
package fuzz |
||||
|
||||
import ( |
||||
"fmt" |
||||
"math/rand" |
||||
"reflect" |
||||
"regexp" |
||||
"time" |
||||
|
||||
"github.com/google/gofuzz/bytesource" |
||||
"strings" |
||||
) |
||||
|
||||
// fuzzFuncMap is a map from a type to a fuzzFunc that handles that type.
|
||||
type fuzzFuncMap map[reflect.Type]reflect.Value |
||||
|
||||
// Fuzzer knows how to fill any object with random fields.
|
||||
type Fuzzer struct { |
||||
fuzzFuncs fuzzFuncMap |
||||
defaultFuzzFuncs fuzzFuncMap |
||||
r *rand.Rand |
||||
nilChance float64 |
||||
minElements int |
||||
maxElements int |
||||
maxDepth int |
||||
skipFieldPatterns []*regexp.Regexp |
||||
} |
||||
|
||||
// New returns a new Fuzzer. Customize your Fuzzer further by calling Funcs,
|
||||
// RandSource, NilChance, or NumElements in any order.
|
||||
func New() *Fuzzer { |
||||
return NewWithSeed(time.Now().UnixNano()) |
||||
} |
||||
|
||||
func NewWithSeed(seed int64) *Fuzzer { |
||||
f := &Fuzzer{ |
||||
defaultFuzzFuncs: fuzzFuncMap{ |
||||
reflect.TypeOf(&time.Time{}): reflect.ValueOf(fuzzTime), |
||||
}, |
||||
|
||||
fuzzFuncs: fuzzFuncMap{}, |
||||
r: rand.New(rand.NewSource(seed)), |
||||
nilChance: .2, |
||||
minElements: 1, |
||||
maxElements: 10, |
||||
maxDepth: 100, |
||||
} |
||||
return f |
||||
} |
||||
|
||||
// NewFromGoFuzz is a helper function that enables using gofuzz (this
|
||||
// project) with go-fuzz (https://github.com/dvyukov/go-fuzz) for continuous
|
||||
// fuzzing. Essentially, it enables translating the fuzzing bytes from
|
||||
// go-fuzz to any Go object using this library.
|
||||
//
|
||||
// This implementation promises a constant translation from a given slice of
|
||||
// bytes to the fuzzed objects. This promise will remain over future
|
||||
// versions of Go and of this library.
|
||||
//
|
||||
// Note: the returned Fuzzer should not be shared between multiple goroutines,
|
||||
// as its deterministic output will no longer be available.
|
||||
//
|
||||
// Example: use go-fuzz to test the function `MyFunc(int)` in the package
|
||||
// `mypackage`. Add the file: "mypacakge_fuzz.go" with the content:
|
||||
//
|
||||
// // +build gofuzz
|
||||
// package mypacakge
|
||||
// import fuzz "github.com/google/gofuzz"
|
||||
// func Fuzz(data []byte) int {
|
||||
// var i int
|
||||
// fuzz.NewFromGoFuzz(data).Fuzz(&i)
|
||||
// MyFunc(i)
|
||||
// return 0
|
||||
// }
|
||||
func NewFromGoFuzz(data []byte) *Fuzzer { |
||||
return New().RandSource(bytesource.New(data)) |
||||
} |
||||
|
||||
// Funcs adds each entry in fuzzFuncs as a custom fuzzing function.
|
||||
//
|
||||
// Each entry in fuzzFuncs must be a function taking two parameters.
|
||||
// The first parameter must be a pointer or map. It is the variable that
|
||||
// function will fill with random data. The second parameter must be a
|
||||
// fuzz.Continue, which will provide a source of randomness and a way
|
||||
// to automatically continue fuzzing smaller pieces of the first parameter.
|
||||
//
|
||||
// These functions are called sensibly, e.g., if you wanted custom string
|
||||
// fuzzing, the function `func(s *string, c fuzz.Continue)` would get
|
||||
// called and passed the address of strings. Maps and pointers will always
|
||||
// be made/new'd for you, ignoring the NilChange option. For slices, it
|
||||
// doesn't make much sense to pre-create them--Fuzzer doesn't know how
|
||||
// long you want your slice--so take a pointer to a slice, and make it
|
||||
// yourself. (If you don't want your map/pointer type pre-made, take a
|
||||
// pointer to it, and make it yourself.) See the examples for a range of
|
||||
// custom functions.
|
||||
func (f *Fuzzer) Funcs(fuzzFuncs ...interface{}) *Fuzzer { |
||||
for i := range fuzzFuncs { |
||||
v := reflect.ValueOf(fuzzFuncs[i]) |
||||
if v.Kind() != reflect.Func { |
||||
panic("Need only funcs!") |
||||
} |
||||
t := v.Type() |
||||
if t.NumIn() != 2 || t.NumOut() != 0 { |
||||
panic("Need 2 in and 0 out params!") |
||||
} |
||||
argT := t.In(0) |
||||
switch argT.Kind() { |
||||
case reflect.Ptr, reflect.Map: |
||||
default: |
||||
panic("fuzzFunc must take pointer or map type") |
||||
} |
||||
if t.In(1) != reflect.TypeOf(Continue{}) { |
||||
panic("fuzzFunc's second parameter must be type fuzz.Continue") |
||||
} |
||||
f.fuzzFuncs[argT] = v |
||||
} |
||||
return f |
||||
} |
||||
|
||||
// RandSource causes f to get values from the given source of randomness.
|
||||
// Use if you want deterministic fuzzing.
|
||||
func (f *Fuzzer) RandSource(s rand.Source) *Fuzzer { |
||||
f.r = rand.New(s) |
||||
return f |
||||
} |
||||
|
||||
// NilChance sets the probability of creating a nil pointer, map, or slice to
|
||||
// 'p'. 'p' should be between 0 (no nils) and 1 (all nils), inclusive.
|
||||
func (f *Fuzzer) NilChance(p float64) *Fuzzer { |
||||
if p < 0 || p > 1 { |
||||
panic("p should be between 0 and 1, inclusive.") |
||||
} |
||||
f.nilChance = p |
||||
return f |
||||
} |
||||
|
||||
// NumElements sets the minimum and maximum number of elements that will be
|
||||
// added to a non-nil map or slice.
|
||||
func (f *Fuzzer) NumElements(atLeast, atMost int) *Fuzzer { |
||||
if atLeast > atMost { |
||||
panic("atLeast must be <= atMost") |
||||
} |
||||
if atLeast < 0 { |
||||
panic("atLeast must be >= 0") |
||||
} |
||||
f.minElements = atLeast |
||||
f.maxElements = atMost |
||||
return f |
||||
} |
||||
|
||||
func (f *Fuzzer) genElementCount() int { |
||||
if f.minElements == f.maxElements { |
||||
return f.minElements |
||||
} |
||||
return f.minElements + f.r.Intn(f.maxElements-f.minElements+1) |
||||
} |
||||
|
||||
func (f *Fuzzer) genShouldFill() bool { |
||||
return f.r.Float64() >= f.nilChance |
||||
} |
||||
|
||||
// MaxDepth sets the maximum number of recursive fuzz calls that will be made
|
||||
// before stopping. This includes struct members, pointers, and map and slice
|
||||
// elements.
|
||||
func (f *Fuzzer) MaxDepth(d int) *Fuzzer { |
||||
f.maxDepth = d |
||||
return f |
||||
} |
||||
|
||||
// Skip fields which match the supplied pattern. Call this multiple times if needed
|
||||
// This is useful to skip XXX_ fields generated by protobuf
|
||||
func (f *Fuzzer) SkipFieldsWithPattern(pattern *regexp.Regexp) *Fuzzer { |
||||
f.skipFieldPatterns = append(f.skipFieldPatterns, pattern) |
||||
return f |
||||
} |
||||
|
||||
// Fuzz recursively fills all of obj's fields with something random. First
|
||||
// this tries to find a custom fuzz function (see Funcs). If there is no
|
||||
// custom function this tests whether the object implements fuzz.Interface and,
|
||||
// if so, calls Fuzz on it to fuzz itself. If that fails, this will see if
|
||||
// there is a default fuzz function provided by this package. If all of that
|
||||
// fails, this will generate random values for all primitive fields and then
|
||||
// recurse for all non-primitives.
|
||||
//
|
||||
// This is safe for cyclic or tree-like structs, up to a limit. Use the
|
||||
// MaxDepth method to adjust how deep you need it to recurse.
|
||||
//
|
||||
// obj must be a pointer. Only exported (public) fields can be set (thanks,
|
||||
// golang :/ ) Intended for tests, so will panic on bad input or unimplemented
|
||||
// fields.
|
||||
func (f *Fuzzer) Fuzz(obj interface{}) { |
||||
v := reflect.ValueOf(obj) |
||||
if v.Kind() != reflect.Ptr { |
||||
panic("needed ptr!") |
||||
} |
||||
v = v.Elem() |
||||
f.fuzzWithContext(v, 0) |
||||
} |
||||
|
||||
// FuzzNoCustom is just like Fuzz, except that any custom fuzz function for
|
||||
// obj's type will not be called and obj will not be tested for fuzz.Interface
|
||||
// conformance. This applies only to obj and not other instances of obj's
|
||||
// type.
|
||||
// Not safe for cyclic or tree-like structs!
|
||||
// obj must be a pointer. Only exported (public) fields can be set (thanks, golang :/ )
|
||||
// Intended for tests, so will panic on bad input or unimplemented fields.
|
||||
func (f *Fuzzer) FuzzNoCustom(obj interface{}) { |
||||
v := reflect.ValueOf(obj) |
||||
if v.Kind() != reflect.Ptr { |
||||
panic("needed ptr!") |
||||
} |
||||
v = v.Elem() |
||||
f.fuzzWithContext(v, flagNoCustomFuzz) |
||||
} |
||||
|
||||
const ( |
||||
// Do not try to find a custom fuzz function. Does not apply recursively.
|
||||
flagNoCustomFuzz uint64 = 1 << iota |
||||
) |
||||
|
||||
func (f *Fuzzer) fuzzWithContext(v reflect.Value, flags uint64) { |
||||
fc := &fuzzerContext{fuzzer: f} |
||||
fc.doFuzz(v, flags) |
||||
} |
||||
|
||||
// fuzzerContext carries context about a single fuzzing run, which lets Fuzzer
|
||||
// be thread-safe.
|
||||
type fuzzerContext struct { |
||||
fuzzer *Fuzzer |
||||
curDepth int |
||||
} |
||||
|
||||
func (fc *fuzzerContext) doFuzz(v reflect.Value, flags uint64) { |
||||
if fc.curDepth >= fc.fuzzer.maxDepth { |
||||
return |
||||
} |
||||
fc.curDepth++ |
||||
defer func() { fc.curDepth-- }() |
||||
|
||||
if !v.CanSet() { |
||||
return |
||||
} |
||||
|
||||
if flags&flagNoCustomFuzz == 0 { |
||||
// Check for both pointer and non-pointer custom functions.
|
||||
if v.CanAddr() && fc.tryCustom(v.Addr()) { |
||||
return |
||||
} |
||||
if fc.tryCustom(v) { |
||||
return |
||||
} |
||||
} |
||||
|
||||
if fn, ok := fillFuncMap[v.Kind()]; ok { |
||||
fn(v, fc.fuzzer.r) |
||||
return |
||||
} |
||||
|
||||
switch v.Kind() { |
||||
case reflect.Map: |
||||
if fc.fuzzer.genShouldFill() { |
||||
v.Set(reflect.MakeMap(v.Type())) |
||||
n := fc.fuzzer.genElementCount() |
||||
for i := 0; i < n; i++ { |
||||
key := reflect.New(v.Type().Key()).Elem() |
||||
fc.doFuzz(key, 0) |
||||
val := reflect.New(v.Type().Elem()).Elem() |
||||
fc.doFuzz(val, 0) |
||||
v.SetMapIndex(key, val) |
||||
} |
||||
return |
||||
} |
||||
v.Set(reflect.Zero(v.Type())) |
||||
case reflect.Ptr: |
||||
if fc.fuzzer.genShouldFill() { |
||||
v.Set(reflect.New(v.Type().Elem())) |
||||
fc.doFuzz(v.Elem(), 0) |
||||
return |
||||
} |
||||
v.Set(reflect.Zero(v.Type())) |
||||
case reflect.Slice: |
||||
if fc.fuzzer.genShouldFill() { |
||||
n := fc.fuzzer.genElementCount() |
||||
v.Set(reflect.MakeSlice(v.Type(), n, n)) |
||||
for i := 0; i < n; i++ { |
||||
fc.doFuzz(v.Index(i), 0) |
||||
} |
||||
return |
||||
} |
||||
v.Set(reflect.Zero(v.Type())) |
||||
case reflect.Array: |
||||
if fc.fuzzer.genShouldFill() { |
||||
n := v.Len() |
||||
for i := 0; i < n; i++ { |
||||
fc.doFuzz(v.Index(i), 0) |
||||
} |
||||
return |
||||
} |
||||
v.Set(reflect.Zero(v.Type())) |
||||
case reflect.Struct: |
||||
for i := 0; i < v.NumField(); i++ { |
||||
skipField := false |
||||
fieldName := v.Type().Field(i).Name |
||||
for _, pattern := range fc.fuzzer.skipFieldPatterns { |
||||
if pattern.MatchString(fieldName) { |
||||
skipField = true |
||||
break |
||||
} |
||||
} |
||||
if !skipField { |
||||
fc.doFuzz(v.Field(i), 0) |
||||
} |
||||
} |
||||
case reflect.Chan: |
||||
fallthrough |
||||
case reflect.Func: |
||||
fallthrough |
||||
case reflect.Interface: |
||||
fallthrough |
||||
default: |
||||
panic(fmt.Sprintf("Can't handle %#v", v.Interface())) |
||||
} |
||||
} |
||||
|
||||
// tryCustom searches for custom handlers, and returns true iff it finds a match
|
||||
// and successfully randomizes v.
|
||||
func (fc *fuzzerContext) tryCustom(v reflect.Value) bool { |
||||
// First: see if we have a fuzz function for it.
|
||||
doCustom, ok := fc.fuzzer.fuzzFuncs[v.Type()] |
||||
if !ok { |
||||
// Second: see if it can fuzz itself.
|
||||
if v.CanInterface() { |
||||
intf := v.Interface() |
||||
if fuzzable, ok := intf.(Interface); ok { |
||||
fuzzable.Fuzz(Continue{fc: fc, Rand: fc.fuzzer.r}) |
||||
return true |
||||
} |
||||
} |
||||
// Finally: see if there is a default fuzz function.
|
||||
doCustom, ok = fc.fuzzer.defaultFuzzFuncs[v.Type()] |
||||
if !ok { |
||||
return false |
||||
} |
||||
} |
||||
|
||||
switch v.Kind() { |
||||
case reflect.Ptr: |
||||
if v.IsNil() { |
||||
if !v.CanSet() { |
||||
return false |
||||
} |
||||
v.Set(reflect.New(v.Type().Elem())) |
||||
} |
||||
case reflect.Map: |
||||
if v.IsNil() { |
||||
if !v.CanSet() { |
||||
return false |
||||
} |
||||
v.Set(reflect.MakeMap(v.Type())) |
||||
} |
||||
default: |
||||
return false |
||||
} |
||||
|
||||
doCustom.Call([]reflect.Value{v, reflect.ValueOf(Continue{ |
||||
fc: fc, |
||||
Rand: fc.fuzzer.r, |
||||
})}) |
||||
return true |
||||
} |
||||
|
||||
// Interface represents an object that knows how to fuzz itself. Any time we
|
||||
// find a type that implements this interface we will delegate the act of
|
||||
// fuzzing itself.
|
||||
type Interface interface { |
||||
Fuzz(c Continue) |
||||
} |
||||
|
||||
// Continue can be passed to custom fuzzing functions to allow them to use
|
||||
// the correct source of randomness and to continue fuzzing their members.
|
||||
type Continue struct { |
||||
fc *fuzzerContext |
||||
|
||||
// For convenience, Continue implements rand.Rand via embedding.
|
||||
// Use this for generating any randomness if you want your fuzzing
|
||||
// to be repeatable for a given seed.
|
||||
*rand.Rand |
||||
} |
||||
|
||||
// Fuzz continues fuzzing obj. obj must be a pointer.
|
||||
func (c Continue) Fuzz(obj interface{}) { |
||||
v := reflect.ValueOf(obj) |
||||
if v.Kind() != reflect.Ptr { |
||||
panic("needed ptr!") |
||||
} |
||||
v = v.Elem() |
||||
c.fc.doFuzz(v, 0) |
||||
} |
||||
|
||||
// FuzzNoCustom continues fuzzing obj, except that any custom fuzz function for
|
||||
// obj's type will not be called and obj will not be tested for fuzz.Interface
|
||||
// conformance. This applies only to obj and not other instances of obj's
|
||||
// type.
|
||||
func (c Continue) FuzzNoCustom(obj interface{}) { |
||||
v := reflect.ValueOf(obj) |
||||
if v.Kind() != reflect.Ptr { |
||||
panic("needed ptr!") |
||||
} |
||||
v = v.Elem() |
||||
c.fc.doFuzz(v, flagNoCustomFuzz) |
||||
} |
||||
|
||||
// RandString makes a random string up to 20 characters long. The returned string
|
||||
// may include a variety of (valid) UTF-8 encodings.
|
||||
func (c Continue) RandString() string { |
||||
return randString(c.Rand) |
||||
} |
||||
|
||||
// RandUint64 makes random 64 bit numbers.
|
||||
// Weirdly, rand doesn't have a function that gives you 64 random bits.
|
||||
func (c Continue) RandUint64() uint64 { |
||||
return randUint64(c.Rand) |
||||
} |
||||
|
||||
// RandBool returns true or false randomly.
|
||||
func (c Continue) RandBool() bool { |
||||
return randBool(c.Rand) |
||||
} |
||||
|
||||
func fuzzInt(v reflect.Value, r *rand.Rand) { |
||||
v.SetInt(int64(randUint64(r))) |
||||
} |
||||
|
||||
func fuzzUint(v reflect.Value, r *rand.Rand) { |
||||
v.SetUint(randUint64(r)) |
||||
} |
||||
|
||||
func fuzzTime(t *time.Time, c Continue) { |
||||
var sec, nsec int64 |
||||
// Allow for about 1000 years of random time values, which keeps things
|
||||
// like JSON parsing reasonably happy.
|
||||
sec = c.Rand.Int63n(1000 * 365 * 24 * 60 * 60) |
||||
c.Fuzz(&nsec) |
||||
*t = time.Unix(sec, nsec) |
||||
} |
||||
|
||||
var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){ |
||||
reflect.Bool: func(v reflect.Value, r *rand.Rand) { |
||||
v.SetBool(randBool(r)) |
||||
}, |
||||
reflect.Int: fuzzInt, |
||||
reflect.Int8: fuzzInt, |
||||
reflect.Int16: fuzzInt, |
||||
reflect.Int32: fuzzInt, |
||||
reflect.Int64: fuzzInt, |
||||
reflect.Uint: fuzzUint, |
||||
reflect.Uint8: fuzzUint, |
||||
reflect.Uint16: fuzzUint, |
||||
reflect.Uint32: fuzzUint, |
||||
reflect.Uint64: fuzzUint, |
||||
reflect.Uintptr: fuzzUint, |
||||
reflect.Float32: func(v reflect.Value, r *rand.Rand) { |
||||
v.SetFloat(float64(r.Float32())) |
||||
}, |
||||
reflect.Float64: func(v reflect.Value, r *rand.Rand) { |
||||
v.SetFloat(r.Float64()) |
||||
}, |
||||
reflect.Complex64: func(v reflect.Value, r *rand.Rand) { |
||||
v.SetComplex(complex128(complex(r.Float32(), r.Float32()))) |
||||
}, |
||||
reflect.Complex128: func(v reflect.Value, r *rand.Rand) { |
||||
v.SetComplex(complex(r.Float64(), r.Float64())) |
||||
}, |
||||
reflect.String: func(v reflect.Value, r *rand.Rand) { |
||||
v.SetString(randString(r)) |
||||
}, |
||||
reflect.UnsafePointer: func(v reflect.Value, r *rand.Rand) { |
||||
panic("unimplemented") |
||||
}, |
||||
} |
||||
|
||||
// randBool returns true or false randomly.
|
||||
func randBool(r *rand.Rand) bool { |
||||
return r.Int31()&(1<<30) == 0 |
||||
} |
||||
|
||||
type int63nPicker interface { |
||||
Int63n(int64) int64 |
||||
} |
||||
|
||||
// UnicodeRange describes a sequential range of unicode characters.
|
||||
// Last must be numerically greater than First.
|
||||
type UnicodeRange struct { |
||||
First, Last rune |
||||
} |
||||
|
||||
// UnicodeRanges describes an arbitrary number of sequential ranges of unicode characters.
|
||||
// To be useful, each range must have at least one character (First <= Last) and
|
||||
// there must be at least one range.
|
||||
type UnicodeRanges []UnicodeRange |
||||
|
||||
// choose returns a random unicode character from the given range, using the
|
||||
// given randomness source.
|
||||
func (ur UnicodeRange) choose(r int63nPicker) rune { |
||||
count := int64(ur.Last - ur.First + 1) |
||||
return ur.First + rune(r.Int63n(count)) |
||||
} |
||||
|
||||
// CustomStringFuzzFunc constructs a FuzzFunc which produces random strings.
|
||||
// Each character is selected from the range ur. If there are no characters
|
||||
// in the range (cr.Last < cr.First), this will panic.
|
||||
func (ur UnicodeRange) CustomStringFuzzFunc() func(s *string, c Continue) { |
||||
ur.check() |
||||
return func(s *string, c Continue) { |
||||
*s = ur.randString(c.Rand) |
||||
} |
||||
} |
||||
|
||||
// check is a function that used to check whether the first of ur(UnicodeRange)
|
||||
// is greater than the last one.
|
||||
func (ur UnicodeRange) check() { |
||||
if ur.Last < ur.First { |
||||
panic("The last encoding must be greater than the first one.") |
||||
} |
||||
} |
||||
|
||||
// randString of UnicodeRange makes a random string up to 20 characters long.
|
||||
// Each character is selected form ur(UnicodeRange).
|
||||
func (ur UnicodeRange) randString(r *rand.Rand) string { |
||||
n := r.Intn(20) |
||||
sb := strings.Builder{} |
||||
sb.Grow(n) |
||||
for i := 0; i < n; i++ { |
||||
sb.WriteRune(ur.choose(r)) |
||||
} |
||||
return sb.String() |
||||
} |
||||
|
||||
// defaultUnicodeRanges sets a default unicode range when user do not set
|
||||
// CustomStringFuzzFunc() but wants fuzz string.
|
||||
var defaultUnicodeRanges = UnicodeRanges{ |
||||
{' ', '~'}, // ASCII characters
|
||||
{'\u00a0', '\u02af'}, // Multi-byte encoded characters
|
||||
{'\u4e00', '\u9fff'}, // Common CJK (even longer encodings)
|
||||
} |
||||
|
||||
// CustomStringFuzzFunc constructs a FuzzFunc which produces random strings.
|
||||
// Each character is selected from one of the ranges of ur(UnicodeRanges).
|
||||
// Each range has an equal probability of being chosen. If there are no ranges,
|
||||
// or a selected range has no characters (.Last < .First), this will panic.
|
||||
// Do not modify any of the ranges in ur after calling this function.
|
||||
func (ur UnicodeRanges) CustomStringFuzzFunc() func(s *string, c Continue) { |
||||
// Check unicode ranges slice is empty.
|
||||
if len(ur) == 0 { |
||||
panic("UnicodeRanges is empty.") |
||||
} |
||||
// if not empty, each range should be checked.
|
||||
for i := range ur { |
||||
ur[i].check() |
||||
} |
||||
return func(s *string, c Continue) { |
||||
*s = ur.randString(c.Rand) |
||||
} |
||||
} |
||||
|
||||
// randString of UnicodeRanges makes a random string up to 20 characters long.
|
||||
// Each character is selected form one of the ranges of ur(UnicodeRanges),
|
||||
// and each range has an equal probability of being chosen.
|
||||
func (ur UnicodeRanges) randString(r *rand.Rand) string { |
||||
n := r.Intn(20) |
||||
sb := strings.Builder{} |
||||
sb.Grow(n) |
||||
for i := 0; i < n; i++ { |
||||
sb.WriteRune(ur[r.Intn(len(ur))].choose(r)) |
||||
} |
||||
return sb.String() |
||||
} |
||||
|
||||
// randString makes a random string up to 20 characters long. The returned string
|
||||
// may include a variety of (valid) UTF-8 encodings.
|
||||
func randString(r *rand.Rand) string { |
||||
return defaultUnicodeRanges.randString(r) |
||||
} |
||||
|
||||
// randUint64 makes random 64 bit numbers.
|
||||
// Weirdly, rand doesn't have a function that gives you 64 random bits.
|
||||
func randUint64(r *rand.Rand) uint64 { |
||||
return uint64(r.Uint32())<<32 | uint64(r.Uint32()) |
||||
} |
||||
@ -0,0 +1,56 @@ |
||||
/* |
||||
Copyright 2024 The Kubernetes Authors. |
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); |
||||
you may not use this file except in compliance with the License. |
||||
You may obtain a copy of the License at |
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software |
||||
distributed under the License is distributed on an "AS IS" BASIS, |
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
See the License for the specific language governing permissions and |
||||
limitations under the License. |
||||
*/ |
||||
|
||||
package operation |
||||
|
||||
import "k8s.io/apimachinery/pkg/util/sets" |
||||
|
||||
// Operation provides contextual information about a validation request and the API
|
||||
// operation being validated.
|
||||
// This type is intended for use with generate validation code and may be enhanced
|
||||
// in the future to include other information needed to validate requests.
|
||||
type Operation struct { |
||||
// Type is the category of operation being validated. This does not
|
||||
// differentiate between HTTP verbs like PUT and PATCH, but rather merges
|
||||
// those into a single "Update" category.
|
||||
Type Type |
||||
|
||||
// Options declare the options enabled for validation.
|
||||
//
|
||||
// Options should be set according to a resource validation strategy before validation
|
||||
// is performed, and must be treated as read-only during validation.
|
||||
//
|
||||
// Options are identified by string names. Option string names may match the name of a feature
|
||||
// gate, in which case the presence of the name in the set indicates that the feature is
|
||||
// considered enabled for the resource being validated. Note that a resource may have a
|
||||
// feature enabled even when the feature gate is disabled. This can happen when feature is
|
||||
// already in-use by a resource, often because the feature gate was enabled when the
|
||||
// resource first began using the feature.
|
||||
//
|
||||
// Unset options are disabled/false.
|
||||
Options sets.Set[string] |
||||
} |
||||
|
||||
// Code is the request operation to be validated.
|
||||
type Type uint32 |
||||
|
||||
const ( |
||||
// Create indicates the request being validated is for a resource create operation.
|
||||
Create Type = iota |
||||
|
||||
// Update indicates the request being validated is for a resource update operation.
|
||||
Update |
||||
) |
||||
4
vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/custom.go
generated
vendored
4
vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/custom.go
generated
vendored
@ -0,0 +1,230 @@ |
||||
/* |
||||
Copyright 2025 The Kubernetes Authors. |
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); |
||||
you may not use this file except in compliance with the License. |
||||
You may obtain a copy of the License at |
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software |
||||
distributed under the License is distributed on an "AS IS" BASIS, |
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
See the License for the specific language governing permissions and |
||||
limitations under the License. |
||||
*/ |
||||
|
||||
package json |
||||
|
||||
import ( |
||||
"encoding/json" |
||||
"fmt" |
||||
"io" |
||||
"maps" |
||||
"slices" |
||||
"sort" |
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta" |
||||
"k8s.io/apimachinery/pkg/conversion" |
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" |
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" |
||||
"k8s.io/apimachinery/pkg/runtime" |
||||
) |
||||
|
||||
func streamEncodeCollections(obj runtime.Object, w io.Writer) (bool, error) { |
||||
list, ok := obj.(*unstructured.UnstructuredList) |
||||
if ok { |
||||
return true, streamingEncodeUnstructuredList(w, list) |
||||
} |
||||
if _, ok := obj.(json.Marshaler); ok { |
||||
return false, nil |
||||
} |
||||
typeMeta, listMeta, items, err := getListMeta(obj) |
||||
if err == nil { |
||||
return true, streamingEncodeList(w, typeMeta, listMeta, items) |
||||
} |
||||
return false, nil |
||||
} |
||||
|
||||
// getListMeta implements list extraction logic for json stream serialization.
|
||||
//
|
||||
// Reason for a custom logic instead of reusing accessors from meta package:
|
||||
// * Validate json tags to prevent incompatibility with json standard package.
|
||||
// * ListMetaAccessor doesn't distinguish empty from nil value.
|
||||
// * TypeAccessort reparsing "apiVersion" and serializing it with "{group}/{version}"
|
||||
func getListMeta(list runtime.Object) (metav1.TypeMeta, metav1.ListMeta, []runtime.Object, error) { |
||||
listValue, err := conversion.EnforcePtr(list) |
||||
if err != nil { |
||||
return metav1.TypeMeta{}, metav1.ListMeta{}, nil, err |
||||
} |
||||
listType := listValue.Type() |
||||
if listType.NumField() != 3 { |
||||
return metav1.TypeMeta{}, metav1.ListMeta{}, nil, fmt.Errorf("expected ListType to have 3 fields") |
||||
} |
||||
// TypeMeta
|
||||
typeMeta, ok := listValue.Field(0).Interface().(metav1.TypeMeta) |
||||
if !ok { |
||||
return metav1.TypeMeta{}, metav1.ListMeta{}, nil, fmt.Errorf("expected TypeMeta field to have TypeMeta type") |
||||
} |
||||
if listType.Field(0).Tag.Get("json") != ",inline" { |
||||
return metav1.TypeMeta{}, metav1.ListMeta{}, nil, fmt.Errorf(`expected TypeMeta json field tag to be ",inline"`) |
||||
} |
||||
// ListMeta
|
||||
listMeta, ok := listValue.Field(1).Interface().(metav1.ListMeta) |
||||
if !ok { |
||||
return metav1.TypeMeta{}, metav1.ListMeta{}, nil, fmt.Errorf("expected ListMeta field to have ListMeta type") |
||||
} |
||||
if listType.Field(1).Tag.Get("json") != "metadata,omitempty" { |
||||
return metav1.TypeMeta{}, metav1.ListMeta{}, nil, fmt.Errorf(`expected ListMeta json field tag to be "metadata,omitempty"`) |
||||
} |
||||
// Items
|
||||
items, err := meta.ExtractList(list) |
||||
if err != nil { |
||||
return metav1.TypeMeta{}, metav1.ListMeta{}, nil, err |
||||
} |
||||
if listType.Field(2).Tag.Get("json") != "items" { |
||||
return metav1.TypeMeta{}, metav1.ListMeta{}, nil, fmt.Errorf(`expected Items json field tag to be "items"`) |
||||
} |
||||
return typeMeta, listMeta, items, nil |
||||
} |
||||
|
||||
func streamingEncodeList(w io.Writer, typeMeta metav1.TypeMeta, listMeta metav1.ListMeta, items []runtime.Object) error { |
||||
// Start
|
||||
if _, err := w.Write([]byte(`{`)); err != nil { |
||||
return err |
||||
} |
||||
|
||||
// TypeMeta
|
||||
if typeMeta.Kind != "" { |
||||
if err := encodeKeyValuePair(w, "kind", typeMeta.Kind, []byte(",")); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
if typeMeta.APIVersion != "" { |
||||
if err := encodeKeyValuePair(w, "apiVersion", typeMeta.APIVersion, []byte(",")); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
|
||||
// ListMeta
|
||||
if err := encodeKeyValuePair(w, "metadata", listMeta, []byte(",")); err != nil { |
||||
return err |
||||
} |
||||
|
||||
// Items
|
||||
if err := encodeItemsObjectSlice(w, items); err != nil { |
||||
return err |
||||
} |
||||
|
||||
// End
|
||||
_, err := w.Write([]byte("}\n")) |
||||
return err |
||||
} |
||||
|
||||
func encodeItemsObjectSlice(w io.Writer, items []runtime.Object) (err error) { |
||||
if items == nil { |
||||
err := encodeKeyValuePair(w, "items", nil, nil) |
||||
return err |
||||
} |
||||
_, err = w.Write([]byte(`"items":[`)) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
suffix := []byte(",") |
||||
for i, item := range items { |
||||
if i == len(items)-1 { |
||||
suffix = nil |
||||
} |
||||
err := encodeValue(w, item, suffix) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
} |
||||
_, err = w.Write([]byte("]")) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
return err |
||||
} |
||||
|
||||
func streamingEncodeUnstructuredList(w io.Writer, list *unstructured.UnstructuredList) error { |
||||
_, err := w.Write([]byte(`{`)) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
keys := slices.Collect(maps.Keys(list.Object)) |
||||
if _, exists := list.Object["items"]; !exists { |
||||
keys = append(keys, "items") |
||||
} |
||||
sort.Strings(keys) |
||||
|
||||
suffix := []byte(",") |
||||
for i, key := range keys { |
||||
if i == len(keys)-1 { |
||||
suffix = nil |
||||
} |
||||
if key == "items" { |
||||
err = encodeItemsUnstructuredSlice(w, list.Items, suffix) |
||||
} else { |
||||
err = encodeKeyValuePair(w, key, list.Object[key], suffix) |
||||
} |
||||
if err != nil { |
||||
return err |
||||
} |
||||
} |
||||
_, err = w.Write([]byte("}\n")) |
||||
return err |
||||
} |
||||
|
||||
func encodeItemsUnstructuredSlice(w io.Writer, items []unstructured.Unstructured, suffix []byte) (err error) { |
||||
_, err = w.Write([]byte(`"items":[`)) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
comma := []byte(",") |
||||
for i, item := range items { |
||||
if i == len(items)-1 { |
||||
comma = nil |
||||
} |
||||
err := encodeValue(w, item.Object, comma) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
} |
||||
_, err = w.Write([]byte("]")) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
if len(suffix) > 0 { |
||||
_, err = w.Write(suffix) |
||||
} |
||||
return err |
||||
} |
||||
|
||||
func encodeKeyValuePair(w io.Writer, key string, value any, suffix []byte) (err error) { |
||||
err = encodeValue(w, key, []byte(":")) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
err = encodeValue(w, value, suffix) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
return err |
||||
} |
||||
|
||||
func encodeValue(w io.Writer, value any, suffix []byte) error { |
||||
data, err := json.Marshal(value) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
_, err = w.Write(data) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
if len(suffix) > 0 { |
||||
_, err = w.Write(suffix) |
||||
} |
||||
return err |
||||
} |
||||
@ -0,0 +1,174 @@ |
||||
/* |
||||
Copyright 2025 The Kubernetes Authors. |
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); |
||||
you may not use this file except in compliance with the License. |
||||
You may obtain a copy of the License at |
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software |
||||
distributed under the License is distributed on an "AS IS" BASIS, |
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
See the License for the specific language governing permissions and |
||||
limitations under the License. |
||||
*/ |
||||
|
||||
package protobuf |
||||
|
||||
import ( |
||||
"errors" |
||||
"io" |
||||
"math/bits" |
||||
|
||||
"github.com/gogo/protobuf/proto" |
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta" |
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" |
||||
"k8s.io/apimachinery/pkg/conversion" |
||||
"k8s.io/apimachinery/pkg/runtime" |
||||
) |
||||
|
||||
var ( |
||||
errFieldCount = errors.New("expected ListType to have 3 fields") |
||||
errTypeMetaField = errors.New("expected TypeMeta field to have TypeMeta type") |
||||
errTypeMetaProtobufTag = errors.New(`expected TypeMeta protobuf field tag to be ""`) |
||||
errListMetaField = errors.New("expected ListMeta field to have ListMeta type") |
||||
errListMetaProtobufTag = errors.New(`expected ListMeta protobuf field tag to be "bytes,1,opt,name=metadata"`) |
||||
errItemsProtobufTag = errors.New(`expected Items protobuf field tag to be "bytes,2,rep,name=items"`) |
||||
errItemsSizer = errors.New(`expected Items elements to implement proto.Sizer`) |
||||
) |
||||
|
||||
// getStreamingListData implements list extraction logic for protobuf stream serialization.
|
||||
//
|
||||
// Reason for a custom logic instead of reusing accessors from meta package:
|
||||
// * Validate proto tags to prevent incompatibility with proto standard package.
|
||||
// * ListMetaAccessor doesn't distinguish empty from nil value.
|
||||
// * TypeAccessor reparsing "apiVersion" and serializing it with "{group}/{version}"
|
||||
func getStreamingListData(list runtime.Object) (data streamingListData, err error) { |
||||
listValue, err := conversion.EnforcePtr(list) |
||||
if err != nil { |
||||
return data, err |
||||
} |
||||
listType := listValue.Type() |
||||
if listType.NumField() != 3 { |
||||
return data, errFieldCount |
||||
} |
||||
// TypeMeta: validated, but not returned as is not serialized.
|
||||
_, ok := listValue.Field(0).Interface().(metav1.TypeMeta) |
||||
if !ok { |
||||
return data, errTypeMetaField |
||||
} |
||||
if listType.Field(0).Tag.Get("protobuf") != "" { |
||||
return data, errTypeMetaProtobufTag |
||||
} |
||||
// ListMeta
|
||||
listMeta, ok := listValue.Field(1).Interface().(metav1.ListMeta) |
||||
if !ok { |
||||
return data, errListMetaField |
||||
} |
||||
// if we were ever to relax the protobuf tag check we should update the hardcoded `0xa` below when writing ListMeta.
|
||||
if listType.Field(1).Tag.Get("protobuf") != "bytes,1,opt,name=metadata" { |
||||
return data, errListMetaProtobufTag |
||||
} |
||||
data.listMeta = listMeta |
||||
// Items; if we were ever to relax the protobuf tag check we should update the hardcoded `0x12` below when writing Items.
|
||||
if listType.Field(2).Tag.Get("protobuf") != "bytes,2,rep,name=items" { |
||||
return data, errItemsProtobufTag |
||||
} |
||||
items, err := meta.ExtractList(list) |
||||
if err != nil { |
||||
return data, err |
||||
} |
||||
data.items = items |
||||
data.totalSize, data.listMetaSize, data.itemsSizes, err = listSize(listMeta, items) |
||||
return data, err |
||||
} |
||||
|
||||
type streamingListData struct { |
||||
// totalSize is the total size of the serialized List object, including their proto headers/size bytes
|
||||
totalSize int |
||||
|
||||
// listMetaSize caches results from .Size() call to listMeta, doesn't include header bytes (field identifier, size)
|
||||
listMetaSize int |
||||
listMeta metav1.ListMeta |
||||
|
||||
// itemsSizes caches results from .Size() call to items, doesn't include header bytes (field identifier, size)
|
||||
itemsSizes []int |
||||
items []runtime.Object |
||||
} |
||||
|
||||
// listSize return size of ListMeta and items to be later used for preallocations.
|
||||
// listMetaSize and itemSizes do not include header bytes (field identifier, size).
|
||||
func listSize(listMeta metav1.ListMeta, items []runtime.Object) (totalSize, listMetaSize int, itemSizes []int, err error) { |
||||
// ListMeta
|
||||
listMetaSize = listMeta.Size() |
||||
totalSize += 1 + sovGenerated(uint64(listMetaSize)) + listMetaSize |
||||
// Items
|
||||
itemSizes = make([]int, len(items)) |
||||
for i, item := range items { |
||||
sizer, ok := item.(proto.Sizer) |
||||
if !ok { |
||||
return totalSize, listMetaSize, nil, errItemsSizer |
||||
} |
||||
n := sizer.Size() |
||||
itemSizes[i] = n |
||||
totalSize += 1 + sovGenerated(uint64(n)) + n |
||||
} |
||||
return totalSize, listMetaSize, itemSizes, nil |
||||
} |
||||
|
||||
func streamingEncodeUnknownList(w io.Writer, unk runtime.Unknown, listData streamingListData, memAlloc runtime.MemoryAllocator) error { |
||||
_, err := w.Write(protoEncodingPrefix) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
// encodeList is responsible for encoding the List into the unknown Raw.
|
||||
encodeList := func(writer io.Writer) (int, error) { |
||||
return streamingEncodeList(writer, listData, memAlloc) |
||||
} |
||||
_, err = unk.MarshalToWriter(w, listData.totalSize, encodeList) |
||||
return err |
||||
} |
||||
|
||||
func streamingEncodeList(w io.Writer, listData streamingListData, memAlloc runtime.MemoryAllocator) (size int, err error) { |
||||
// ListMeta; 0xa = (1 << 3) | 2; field number: 1, type: 2 (LEN). https://protobuf.dev/programming-guides/encoding/#structure
|
||||
n, err := doEncodeWithHeader(&listData.listMeta, w, 0xa, listData.listMetaSize, memAlloc) |
||||
size += n |
||||
if err != nil { |
||||
return size, err |
||||
} |
||||
// Items; 0x12 = (2 << 3) | 2; field number: 2, type: 2 (LEN). https://protobuf.dev/programming-guides/encoding/#structure
|
||||
for i, item := range listData.items { |
||||
n, err := doEncodeWithHeader(item, w, 0x12, listData.itemsSizes[i], memAlloc) |
||||
size += n |
||||
if err != nil { |
||||
return size, err |
||||
} |
||||
} |
||||
return size, nil |
||||
} |
||||
|
||||
func writeVarintGenerated(w io.Writer, v int) (int, error) { |
||||
buf := make([]byte, sovGenerated(uint64(v))) |
||||
encodeVarintGenerated(buf, len(buf), uint64(v)) |
||||
return w.Write(buf) |
||||
} |
||||
|
||||
// sovGenerated is copied from `generated.pb.go` returns size of varint.
|
||||
func sovGenerated(v uint64) int { |
||||
return (bits.Len64(v|1) + 6) / 7 |
||||
} |
||||
|
||||
// encodeVarintGenerated is copied from `generated.pb.go` encodes varint.
|
||||
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { |
||||
offset -= sovGenerated(v) |
||||
base := offset |
||||
for v >= 1<<7 { |
||||
dAtA[offset] = uint8(v&0x7f | 0x80) |
||||
v >>= 7 |
||||
offset++ |
||||
} |
||||
dAtA[offset] = uint8(v) |
||||
return base |
||||
} |
||||
@ -0,0 +1,212 @@ |
||||
/* |
||||
Copyright 2025 The Kubernetes Authors. |
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); |
||||
you may not use this file except in compliance with the License. |
||||
You may obtain a copy of the License at |
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software |
||||
distributed under the License is distributed on an "AS IS" BASIS, |
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
See the License for the specific language governing permissions and |
||||
limitations under the License. |
||||
*/ |
||||
|
||||
package field |
||||
|
||||
import ( |
||||
"fmt" |
||||
"reflect" |
||||
"regexp" |
||||
"strings" |
||||
) |
||||
|
||||
// ErrorMatcher is a helper for comparing Error objects.
|
||||
type ErrorMatcher struct { |
||||
// TODO(thockin): consider whether type is ever NOT required, maybe just
|
||||
// assume it.
|
||||
matchType bool |
||||
// TODO(thockin): consider whether field could be assumed - if the
|
||||
// "want" error has a nil field, don't match on field.
|
||||
matchField bool |
||||
// TODO(thockin): consider whether value could be assumed - if the
|
||||
// "want" error has a nil value, don't match on field.
|
||||
matchValue bool |
||||
matchOrigin bool |
||||
matchDetail func(want, got string) bool |
||||
requireOriginWhenInvalid bool |
||||
} |
||||
|
||||
// Matches returns true if the two Error objects match according to the
|
||||
// configured criteria.
|
||||
func (m ErrorMatcher) Matches(want, got *Error) bool { |
||||
if m.matchType && want.Type != got.Type { |
||||
return false |
||||
} |
||||
if m.matchField && want.Field != got.Field { |
||||
return false |
||||
} |
||||
if m.matchValue && !reflect.DeepEqual(want.BadValue, got.BadValue) { |
||||
return false |
||||
} |
||||
if m.matchOrigin { |
||||
if want.Origin != got.Origin { |
||||
return false |
||||
} |
||||
if m.requireOriginWhenInvalid && want.Type == ErrorTypeInvalid { |
||||
if want.Origin == "" || got.Origin == "" { |
||||
return false |
||||
} |
||||
} |
||||
} |
||||
if m.matchDetail != nil && !m.matchDetail(want.Detail, got.Detail) { |
||||
return false |
||||
} |
||||
return true |
||||
} |
||||
|
||||
// Render returns a string representation of the specified Error object,
|
||||
// according to the criteria configured in the ErrorMatcher.
|
||||
func (m ErrorMatcher) Render(e *Error) string { |
||||
buf := strings.Builder{} |
||||
|
||||
comma := func() { |
||||
if buf.Len() > 0 { |
||||
buf.WriteString(", ") |
||||
} |
||||
} |
||||
|
||||
if m.matchType { |
||||
comma() |
||||
buf.WriteString(fmt.Sprintf("Type=%q", e.Type)) |
||||
} |
||||
if m.matchField { |
||||
comma() |
||||
buf.WriteString(fmt.Sprintf("Field=%q", e.Field)) |
||||
} |
||||
if m.matchValue { |
||||
comma() |
||||
buf.WriteString(fmt.Sprintf("Value=%v", e.BadValue)) |
||||
} |
||||
if m.matchOrigin || m.requireOriginWhenInvalid && e.Type == ErrorTypeInvalid { |
||||
comma() |
||||
buf.WriteString(fmt.Sprintf("Origin=%q", e.Origin)) |
||||
} |
||||
if m.matchDetail != nil { |
||||
comma() |
||||
buf.WriteString(fmt.Sprintf("Detail=%q", e.Detail)) |
||||
} |
||||
return "{" + buf.String() + "}" |
||||
} |
||||
|
||||
// Exactly returns a derived ErrorMatcher which matches all fields exactly.
|
||||
func (m ErrorMatcher) Exactly() ErrorMatcher { |
||||
return m.ByType().ByField().ByValue().ByOrigin().ByDetailExact() |
||||
} |
||||
|
||||
// ByType returns a derived ErrorMatcher which also matches by type.
|
||||
func (m ErrorMatcher) ByType() ErrorMatcher { |
||||
m.matchType = true |
||||
return m |
||||
} |
||||
|
||||
// ByField returns a derived ErrorMatcher which also matches by field path.
|
||||
func (m ErrorMatcher) ByField() ErrorMatcher { |
||||
m.matchField = true |
||||
return m |
||||
} |
||||
|
||||
// ByValue returns a derived ErrorMatcher which also matches by the errant
|
||||
// value.
|
||||
func (m ErrorMatcher) ByValue() ErrorMatcher { |
||||
m.matchValue = true |
||||
return m |
||||
} |
||||
|
||||
// ByOrigin returns a derived ErrorMatcher which also matches by the origin.
|
||||
func (m ErrorMatcher) ByOrigin() ErrorMatcher { |
||||
m.matchOrigin = true |
||||
return m |
||||
} |
||||
|
||||
// RequireOriginWhenInvalid returns a derived ErrorMatcher which also requires
|
||||
// the Origin field to be set when the Type is Invalid and the matcher is
|
||||
// matching by Origin.
|
||||
func (m ErrorMatcher) RequireOriginWhenInvalid() ErrorMatcher { |
||||
m.requireOriginWhenInvalid = true |
||||
return m |
||||
} |
||||
|
||||
// ByDetailExact returns a derived ErrorMatcher which also matches errors by
|
||||
// the exact detail string.
|
||||
func (m ErrorMatcher) ByDetailExact() ErrorMatcher { |
||||
m.matchDetail = func(want, got string) bool { |
||||
return got == want |
||||
} |
||||
return m |
||||
} |
||||
|
||||
// ByDetailSubstring returns a derived ErrorMatcher which also matches errors
|
||||
// by a substring of the detail string.
|
||||
func (m ErrorMatcher) ByDetailSubstring() ErrorMatcher { |
||||
m.matchDetail = func(want, got string) bool { |
||||
return strings.Contains(got, want) |
||||
} |
||||
return m |
||||
} |
||||
|
||||
// ByDetailRegexp returns a derived ErrorMatcher which also matches errors by a
|
||||
// regular expression of the detail string, where the "want" string is assumed
|
||||
// to be a valid regular expression.
|
||||
func (m ErrorMatcher) ByDetailRegexp() ErrorMatcher { |
||||
m.matchDetail = func(want, got string) bool { |
||||
return regexp.MustCompile(want).MatchString(got) |
||||
} |
||||
return m |
||||
} |
||||
|
||||
// TestIntf lets users pass a testing.T while not coupling this package to Go's
|
||||
// testing package.
|
||||
type TestIntf interface { |
||||
Helper() |
||||
Errorf(format string, args ...any) |
||||
Logf(format string, args ...any) |
||||
} |
||||
|
||||
// Test compares two ErrorLists by the criteria configured in this matcher, and
|
||||
// fails the test if they don't match. If a given "want" error matches multiple
|
||||
// "got" errors, they will all be consumed. This might be OK (e.g. if there are
|
||||
// multiple errors on the same field from the same origin) or it might be an
|
||||
// insufficiently specific matcher, so these will be logged.
|
||||
func (m ErrorMatcher) Test(tb TestIntf, want, got ErrorList) { |
||||
tb.Helper() |
||||
|
||||
remaining := got |
||||
for _, w := range want { |
||||
tmp := make(ErrorList, 0, len(remaining)) |
||||
n := 0 |
||||
for _, g := range remaining { |
||||
if m.Matches(w, g) { |
||||
n++ |
||||
} else { |
||||
tmp = append(tmp, g) |
||||
} |
||||
} |
||||
if n == 0 { |
||||
tb.Errorf("expected an error matching:\n%s", m.Render(w)) |
||||
} else if n > 1 { |
||||
// This is not necessarily and error, but it's worth logging in
|
||||
// case it's not what the test author intended.
|
||||
tb.Logf("multiple errors matched:\n%s", m.Render(w)) |
||||
} |
||||
remaining = tmp |
||||
} |
||||
if len(remaining) > 0 { |
||||
for _, e := range remaining { |
||||
exactly := m.Exactly() // makes a copy
|
||||
tb.Errorf("unmatched error:\n%s", exactly.Render(e)) |
||||
} |
||||
} |
||||
} |
||||
@ -0,0 +1,278 @@ |
||||
/* |
||||
Copyright 2023 The Kubernetes Authors. |
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); |
||||
you may not use this file except in compliance with the License. |
||||
You may obtain a copy of the License at |
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software |
||||
distributed under the License is distributed on an "AS IS" BASIS, |
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
See the License for the specific language governing permissions and |
||||
limitations under the License. |
||||
*/ |
||||
|
||||
package validation |
||||
|
||||
import ( |
||||
"fmt" |
||||
"net" |
||||
"net/netip" |
||||
"slices" |
||||
|
||||
"k8s.io/apimachinery/pkg/util/validation/field" |
||||
"k8s.io/klog/v2" |
||||
netutils "k8s.io/utils/net" |
||||
) |
||||
|
||||
func parseIP(fldPath *field.Path, value string, strictValidation bool) (net.IP, field.ErrorList) { |
||||
var allErrors field.ErrorList |
||||
|
||||
ip := netutils.ParseIPSloppy(value) |
||||
if ip == nil { |
||||
allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IP address, (e.g. 10.9.8.7 or 2001:db8::ffff)")) |
||||
return nil, allErrors |
||||
} |
||||
|
||||
if strictValidation { |
||||
addr, err := netip.ParseAddr(value) |
||||
if err != nil { |
||||
// If netutils.ParseIPSloppy parsed it, but netip.ParseAddr
|
||||
// doesn't, then it must have illegal leading 0s.
|
||||
allErrors = append(allErrors, field.Invalid(fldPath, value, "must not have leading 0s")) |
||||
} |
||||
if addr.Is4In6() { |
||||
allErrors = append(allErrors, field.Invalid(fldPath, value, "must not be an IPv4-mapped IPv6 address")) |
||||
} |
||||
} |
||||
|
||||
return ip, allErrors |
||||
} |
||||
|
||||
// IsValidIPForLegacyField tests that the argument is a valid IP address for a "legacy"
|
||||
// API field that predates strict IP validation. In particular, this allows IPs that are
|
||||
// not in canonical form (e.g., "FE80:0:0:0:0:0:0:0abc" instead of "fe80::abc").
|
||||
//
|
||||
// If strictValidation is false, this also allows IPs in certain invalid or ambiguous
|
||||
// formats:
|
||||
//
|
||||
// 1. IPv4 IPs are allowed to have leading "0"s in octets (e.g. "010.002.003.004").
|
||||
// Historically, net.ParseIP (and later netutils.ParseIPSloppy) simply ignored leading
|
||||
// "0"s in IPv4 addresses, but most libc-based software treats 0-prefixed IPv4 octets
|
||||
// as octal, meaning different software might interpret the same string as two
|
||||
// different IPs, potentially leading to security issues. (Current net.ParseIP and
|
||||
// netip.ParseAddr simply reject inputs with leading "0"s.)
|
||||
//
|
||||
// 2. IPv4-mapped IPv6 IPs (e.g. "::ffff:1.2.3.4") are allowed. These can also lead to
|
||||
// different software interpreting the value in different ways, because they may be
|
||||
// treated as IPv4 by some software and IPv6 by other software. (net.ParseIP and
|
||||
// netip.ParseAddr both allow these, but there are no use cases for representing IPv4
|
||||
// addresses as IPv4-mapped IPv6 addresses in Kubernetes.)
|
||||
//
|
||||
// Alternatively, when validating an update to an existing field, you can pass a list of
|
||||
// IP values from the old object that should be accepted if they appear in the new object
|
||||
// even if they are not valid.
|
||||
//
|
||||
// This function should only be used to validate the existing fields that were
|
||||
// historically validated in this way, and strictValidation should be true unless the
|
||||
// StrictIPCIDRValidation feature gate is disabled. Use IsValidIP for parsing new fields.
|
||||
func IsValidIPForLegacyField(fldPath *field.Path, value string, strictValidation bool, validOldIPs []string) field.ErrorList { |
||||
if slices.Contains(validOldIPs, value) { |
||||
return nil |
||||
} |
||||
_, allErrors := parseIP(fldPath, value, strictValidation) |
||||
return allErrors.WithOrigin("format=ip-sloppy") |
||||
} |
||||
|
||||
// IsValidIP tests that the argument is a valid IP address, according to current
|
||||
// Kubernetes standards for IP address validation.
|
||||
func IsValidIP(fldPath *field.Path, value string) field.ErrorList { |
||||
ip, allErrors := parseIP(fldPath, value, true) |
||||
if len(allErrors) != 0 { |
||||
return allErrors.WithOrigin("format=ip-strict") |
||||
} |
||||
|
||||
if value != ip.String() { |
||||
allErrors = append(allErrors, field.Invalid(fldPath, value, fmt.Sprintf("must be in canonical form (%q)", ip.String()))) |
||||
} |
||||
return allErrors.WithOrigin("format=ip-strict") |
||||
} |
||||
|
||||
// GetWarningsForIP returns warnings for IP address values in non-standard forms. This
|
||||
// should only be used with fields that are validated with IsValidIPForLegacyField().
|
||||
func GetWarningsForIP(fldPath *field.Path, value string) []string { |
||||
ip := netutils.ParseIPSloppy(value) |
||||
if ip == nil { |
||||
klog.ErrorS(nil, "GetWarningsForIP called on value that was not validated with IsValidIPForLegacyField", "field", fldPath, "value", value) |
||||
return nil |
||||
} |
||||
|
||||
addr, _ := netip.ParseAddr(value) |
||||
if !addr.IsValid() || addr.Is4In6() { |
||||
// This catches 2 cases: leading 0s (if ParseIPSloppy() accepted it but
|
||||
// ParseAddr() doesn't) or IPv4-mapped IPv6 (.Is4In6()). Either way,
|
||||
// re-stringifying the net.IP value will give the preferred form.
|
||||
return []string{ |
||||
fmt.Sprintf("%s: non-standard IP address %q will be considered invalid in a future Kubernetes release: use %q", fldPath, value, ip.String()), |
||||
} |
||||
} |
||||
|
||||
// If ParseIPSloppy() and ParseAddr() both accept it then it's fully valid, though
|
||||
// it may be non-canonical.
|
||||
if addr.Is6() && addr.String() != value { |
||||
return []string{ |
||||
fmt.Sprintf("%s: IPv6 address %q should be in RFC 5952 canonical format (%q)", fldPath, value, addr.String()), |
||||
} |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
func parseCIDR(fldPath *field.Path, value string, strictValidation bool) (*net.IPNet, field.ErrorList) { |
||||
var allErrors field.ErrorList |
||||
|
||||
_, ipnet, err := netutils.ParseCIDRSloppy(value) |
||||
if err != nil { |
||||
allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid CIDR value, (e.g. 10.9.8.0/24 or 2001:db8::/64)")) |
||||
return nil, allErrors |
||||
} |
||||
|
||||
if strictValidation { |
||||
prefix, err := netip.ParsePrefix(value) |
||||
if err != nil { |
||||
// If netutils.ParseCIDRSloppy parsed it, but netip.ParsePrefix
|
||||
// doesn't, then it must have illegal leading 0s (either in the
|
||||
// IP part or the prefix).
|
||||
allErrors = append(allErrors, field.Invalid(fldPath, value, "must not have leading 0s in IP or prefix length")) |
||||
} else if prefix.Addr().Is4In6() { |
||||
allErrors = append(allErrors, field.Invalid(fldPath, value, "must not have an IPv4-mapped IPv6 address")) |
||||
} else if prefix.Addr() != prefix.Masked().Addr() { |
||||
allErrors = append(allErrors, field.Invalid(fldPath, value, "must not have bits set beyond the prefix length")) |
||||
} |
||||
} |
||||
|
||||
return ipnet, allErrors |
||||
} |
||||
|
||||
// IsValidCIDRForLegacyField tests that the argument is a valid CIDR value for a "legacy"
|
||||
// API field that predates strict IP validation. In particular, this allows IPs that are
|
||||
// not in canonical form (e.g., "FE80:0abc:0:0:0:0:0:0/64" instead of "fe80:abc::/64").
|
||||
//
|
||||
// If strictValidation is false, this also allows CIDR values in certain invalid or
|
||||
// ambiguous formats:
|
||||
//
|
||||
// 1. The IP part of the CIDR value is parsed as with IsValidIPForLegacyField with
|
||||
// strictValidation=false.
|
||||
//
|
||||
// 2. The CIDR value is allowed to be either a "subnet"/"mask" (with the lower bits after
|
||||
// the prefix length all being 0), or an "interface address" as with `ip addr` (with a
|
||||
// complete IP address and associated subnet length). With strict validation, the
|
||||
// value is required to be in "subnet"/"mask" form.
|
||||
//
|
||||
// 3. The prefix length is allowed to have leading 0s.
|
||||
//
|
||||
// Alternatively, when validating an update to an existing field, you can pass a list of
|
||||
// CIDR values from the old object that should be accepted if they appear in the new
|
||||
// object even if they are not valid.
|
||||
//
|
||||
// This function should only be used to validate the existing fields that were
|
||||
// historically validated in this way, and strictValidation should be true unless the
|
||||
// StrictIPCIDRValidation feature gate is disabled. Use IsValidCIDR or
|
||||
// IsValidInterfaceAddress for parsing new fields.
|
||||
func IsValidCIDRForLegacyField(fldPath *field.Path, value string, strictValidation bool, validOldCIDRs []string) field.ErrorList { |
||||
if slices.Contains(validOldCIDRs, value) { |
||||
return nil |
||||
} |
||||
|
||||
_, allErrors := parseCIDR(fldPath, value, strictValidation) |
||||
return allErrors |
||||
} |
||||
|
||||
// IsValidCIDR tests that the argument is a valid CIDR value, according to current
|
||||
// Kubernetes standards for CIDR validation. This function is only for
|
||||
// "subnet"/"mask"-style CIDR values (e.g., "192.168.1.0/24", with no bits set beyond the
|
||||
// prefix length). Use IsValidInterfaceAddress for "ifaddr"-style CIDR values.
|
||||
func IsValidCIDR(fldPath *field.Path, value string) field.ErrorList { |
||||
ipnet, allErrors := parseCIDR(fldPath, value, true) |
||||
if len(allErrors) != 0 { |
||||
return allErrors |
||||
} |
||||
|
||||
if value != ipnet.String() { |
||||
allErrors = append(allErrors, field.Invalid(fldPath, value, fmt.Sprintf("must be in canonical form (%q)", ipnet.String()))) |
||||
} |
||||
return allErrors |
||||
} |
||||
|
||||
// GetWarningsForCIDR returns warnings for CIDR values in non-standard forms. This should
|
||||
// only be used with fields that are validated with IsValidCIDRForLegacyField().
|
||||
func GetWarningsForCIDR(fldPath *field.Path, value string) []string { |
||||
ip, ipnet, err := netutils.ParseCIDRSloppy(value) |
||||
if err != nil { |
||||
klog.ErrorS(err, "GetWarningsForCIDR called on value that was not validated with IsValidCIDRForLegacyField", "field", fldPath, "value", value) |
||||
return nil |
||||
} |
||||
|
||||
var warnings []string |
||||
|
||||
// Check for bits set after prefix length
|
||||
if !ip.Equal(ipnet.IP) { |
||||
_, addrlen := ipnet.Mask.Size() |
||||
singleIPCIDR := fmt.Sprintf("%s/%d", ip.String(), addrlen) |
||||
warnings = append(warnings, |
||||
fmt.Sprintf("%s: CIDR value %q is ambiguous in this context (should be %q or %q?)", fldPath, value, ipnet.String(), singleIPCIDR), |
||||
) |
||||
} |
||||
|
||||
prefix, _ := netip.ParsePrefix(value) |
||||
addr := prefix.Addr() |
||||
if !prefix.IsValid() || addr.Is4In6() { |
||||
// This catches 2 cases: leading 0s (if ParseCIDRSloppy() accepted it but
|
||||
// ParsePrefix() doesn't) or IPv4-mapped IPv6 (.Is4In6()). Either way,
|
||||
// re-stringifying the net.IPNet value will give the preferred form.
|
||||
warnings = append(warnings, |
||||
fmt.Sprintf("%s: non-standard CIDR value %q will be considered invalid in a future Kubernetes release: use %q", fldPath, value, ipnet.String()), |
||||
) |
||||
} |
||||
|
||||
// If ParseCIDRSloppy() and ParsePrefix() both accept it then it's fully valid,
|
||||
// though it may be non-canonical. But only check this if there are no other
|
||||
// warnings, since either of the other warnings would also cause a round-trip
|
||||
// failure.
|
||||
if len(warnings) == 0 && addr.Is6() && prefix.String() != value { |
||||
warnings = append(warnings, |
||||
fmt.Sprintf("%s: IPv6 CIDR value %q should be in RFC 5952 canonical format (%q)", fldPath, value, prefix.String()), |
||||
) |
||||
} |
||||
|
||||
return warnings |
||||
} |
||||
|
||||
// IsValidInterfaceAddress tests that the argument is a valid "ifaddr"-style CIDR value in
|
||||
// canonical form (e.g., "192.168.1.5/24", with a complete IP address and associated
|
||||
// subnet length). Use IsValidCIDR for "subnet"/"mask"-style CIDR values (e.g.,
|
||||
// "192.168.1.0/24").
|
||||
func IsValidInterfaceAddress(fldPath *field.Path, value string) field.ErrorList { |
||||
var allErrors field.ErrorList |
||||
ip, ipnet, err := netutils.ParseCIDRSloppy(value) |
||||
if err != nil { |
||||
allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid address in CIDR form, (e.g. 10.9.8.7/24 or 2001:db8::1/64)")) |
||||
return allErrors |
||||
} |
||||
|
||||
// The canonical form of `value` is not `ipnet.String()`, because `ipnet` doesn't
|
||||
// include the bits after the prefix. We need to construct the canonical form
|
||||
// ourselves from `ip` and `ipnet.Mask`.
|
||||
maskSize, _ := ipnet.Mask.Size() |
||||
if netutils.IsIPv4(ip) && maskSize > net.IPv4len*8 { |
||||
// "::ffff:192.168.0.1/120" -> "192.168.0.1/24"
|
||||
maskSize -= (net.IPv6len - net.IPv4len) * 8 |
||||
} |
||||
canonical := fmt.Sprintf("%s/%d", ip.String(), maskSize) |
||||
if value != canonical { |
||||
allErrors = append(allErrors, field.Invalid(fldPath, value, fmt.Sprintf("must be in canonical form (%q)", canonical))) |
||||
} |
||||
return allErrors |
||||
} |
||||
@ -0,0 +1,130 @@ |
||||
/* |
||||
Copyright 2025 The Kubernetes Authors. |
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); |
||||
you may not use this file except in compliance with the License. |
||||
You may obtain a copy of the License at |
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software |
||||
distributed under the License is distributed on an "AS IS" BASIS, |
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
See the License for the specific language governing permissions and |
||||
limitations under the License. |
||||
*/ |
||||
|
||||
package yaml |
||||
|
||||
import "io" |
||||
|
||||
// StreamReader is a reader designed for consuming streams of variable-length
|
||||
// messages. It buffers data until it is explicitly consumed, and can be
|
||||
// rewound to re-read previous data.
|
||||
type StreamReader struct { |
||||
r io.Reader |
||||
buf []byte |
||||
head int // current read offset into buf
|
||||
ttlConsumed int // number of bytes which have been consumed
|
||||
} |
||||
|
||||
// NewStreamReader creates a new StreamReader wrapping the provided
|
||||
// io.Reader.
|
||||
func NewStreamReader(r io.Reader, size int) *StreamReader { |
||||
if size == 0 { |
||||
size = 4096 |
||||
} |
||||
return &StreamReader{ |
||||
r: r, |
||||
buf: make([]byte, 0, size), // Start with a reasonable capacity
|
||||
} |
||||
} |
||||
|
||||
// Read implements io.Reader. It first returns any buffered data after the
|
||||
// current offset, and if that's exhausted, reads from the underlying reader
|
||||
// and buffers the data. The returned data is not considered consumed until the
|
||||
// Consume method is called.
|
||||
func (r *StreamReader) Read(p []byte) (n int, err error) { |
||||
// If we have buffered data, return it
|
||||
if r.head < len(r.buf) { |
||||
n = copy(p, r.buf[r.head:]) |
||||
r.head += n |
||||
return n, nil |
||||
} |
||||
|
||||
// If we've already hit EOF, return it
|
||||
if r.r == nil { |
||||
return 0, io.EOF |
||||
} |
||||
|
||||
// Read from the underlying reader
|
||||
n, err = r.r.Read(p) |
||||
if n > 0 { |
||||
r.buf = append(r.buf, p[:n]...) |
||||
r.head += n |
||||
} |
||||
if err == nil { |
||||
return n, nil |
||||
} |
||||
if err == io.EOF { |
||||
// Store that we've hit EOF by setting r to nil
|
||||
r.r = nil |
||||
} |
||||
return n, err |
||||
} |
||||
|
||||
// ReadN reads exactly n bytes from the reader, blocking until all bytes are
|
||||
// read or an error occurs. If an error occurs, the number of bytes read is
|
||||
// returned along with the error. If EOF is hit before n bytes are read, this
|
||||
// will return the bytes read so far, along with io.EOF. The returned data is
|
||||
// not considered consumed until the Consume method is called.
|
||||
func (r *StreamReader) ReadN(want int) ([]byte, error) { |
||||
ret := make([]byte, want) |
||||
off := 0 |
||||
for off < want { |
||||
n, err := r.Read(ret[off:]) |
||||
if err != nil { |
||||
return ret[:off+n], err |
||||
} |
||||
off += n |
||||
} |
||||
return ret, nil |
||||
} |
||||
|
||||
// Peek returns the next n bytes without advancing the reader. The returned
|
||||
// bytes are valid until the next call to Consume.
|
||||
func (r *StreamReader) Peek(n int) ([]byte, error) { |
||||
buf, err := r.ReadN(n) |
||||
r.RewindN(len(buf)) |
||||
if err != nil { |
||||
return buf, err |
||||
} |
||||
return buf, nil |
||||
} |
||||
|
||||
// Rewind resets the reader to the beginning of the buffered data.
|
||||
func (r *StreamReader) Rewind() { |
||||
r.head = 0 |
||||
} |
||||
|
||||
// RewindN rewinds the reader by n bytes. If n is greater than the current
|
||||
// buffer, the reader is rewound to the beginning of the buffer.
|
||||
func (r *StreamReader) RewindN(n int) { |
||||
r.head -= min(n, r.head) |
||||
} |
||||
|
||||
// Consume discards up to n bytes of previously read data from the beginning of
|
||||
// the buffer. Once consumed, that data is no longer available for rewinding.
|
||||
// If n is greater than the current buffer, the buffer is cleared. Consume
|
||||
// never consume data from the underlying reader.
|
||||
func (r *StreamReader) Consume(n int) { |
||||
n = min(n, len(r.buf)) |
||||
r.buf = r.buf[n:] |
||||
r.head -= n |
||||
r.ttlConsumed += n |
||||
} |
||||
|
||||
// Consumed returns the number of bytes consumed from the input reader.
|
||||
func (r *StreamReader) Consumed() int { |
||||
return r.ttlConsumed |
||||
} |
||||
@ -0,0 +1,43 @@ |
||||
# Contributing Guidelines |
||||
|
||||
Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://git.k8s.io/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt: |
||||
|
||||
_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._ |
||||
|
||||
## Getting Started |
||||
|
||||
We have full documentation on how to get started contributing here: |
||||
|
||||
<!--- |
||||
If your repo has certain guidelines for contribution, put them here ahead of the general k8s resources |
||||
--> |
||||
|
||||
- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests |
||||
- [Kubernetes Contributor Guide](https://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](https://git.k8s.io/community/contributors/guide#contributing) |
||||
- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet) - Common resources for existing developers |
||||
|
||||
## Mentorship |
||||
|
||||
- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers! |
||||
|
||||
<!--- |
||||
Custom Information - if you're copying this template for the first time you can add custom content here, for example: |
||||
|
||||
## Contact Information |
||||
|
||||
- [Slack channel](https://kubernetes.slack.com/messages/kubernetes-users) - Replace `kubernetes-users` with your slack channel string, this will send users directly to your channel. |
||||
- [Mailing list](URL) |
||||
|
||||
--> |
||||
|
||||
## Project Management |
||||
|
||||
The [maintainers](https://github.com/kubernetes-sigs/randfill/blob/main/OWNERS_ALIASES#L12) of this project (and often others who have official positions on the [contributor ladder](https://github.com/kubernetes-sigs/randfill/blob/main/OWNERS_ALIASES)) are responsible for performing project management which oversees development and maintenance of the API, tests, tools, e.t.c. While we try to be generally flexible when it comes to the management of individual pieces (such as Issues or PRs), we have some rules and guidelines which help us plan, coordinate and reduce waste. In this section you'll find some rules/guidelines for contributors related to project management which may extend or go beyond what you would find in the standard [Kubernetes Contributor Guide](https://git.k8s.io/community/contributors/guide). |
||||
|
||||
### Bumping stale and closed Issues & PRs |
||||
|
||||
Maintainers are ultimately responsible for triaging new issues and PRs, accepting or declining them, deciding priority and fitting them into milestones intended for future releases. Bots are responsible for marking issues and PRs which stagnate as stale, or closing them if progress does not continue for a long period of time. Due to the nature of this community-driven development effort (we do not have dedicated engineering resources, we rely on the community which is effectively "volunteer time") **not all issues can be accepted, prioritized or completed**. |
||||
|
||||
You may find times when an issue you're subscribed to and interested in seems to stagnate, or perhaps gets auto-closed. Prior to bumping or directly re-opening issues yourself, we generally ask that you bring these up for discussion on the agenda for one of our community syncs if possible, or bring them up for discussion in Slack or the mailing list as this gives us a better opportunity to discuss the issue and determine viability and logistics. If feasible we **highly recommend being ready to contribute directly** to any stale or unprioritized effort that you want to see move forward, as **the best way to ensure progress is to engage with the community and personally invest time**. |
||||
|
||||
We (the community) aren't opposed to making exceptions in some cases, but when in doubt please follow the above guidelines before bumping closed or stale issues if you're not ready to personally invest time in them. We are responsible for managing these and without further context or engagement we may set these back to how they were previously organized. |
||||
@ -0,0 +1,24 @@ |
||||
When donating the randfill project to the CNCF, we could not reach all the |
||||
gofuzz contributors to sign the CNCF CLA. As such, according to the CNCF rules |
||||
to donate a repository, we must add a NOTICE referencing section 7 of the CLA |
||||
with a list of developers who could not be reached. |
||||
|
||||
`7. Should You wish to submit work that is not Your original creation, You may |
||||
submit it to the Foundation separately from any Contribution, identifying the |
||||
complete details of its source and of any license or other restriction |
||||
(including, but not limited to, related patents, trademarks, and license |
||||
agreements) of which you are personally aware, and conspicuously marking the |
||||
work as "Submitted on behalf of a third-party: [named here]".` |
||||
|
||||
Submitted on behalf of a third-party: @dnephin (Daniel Nephin) |
||||
Submitted on behalf of a third-party: @AlekSi (Alexey Palazhchenko) |
||||
Submitted on behalf of a third-party: @bbigras (Bruno Bigras) |
||||
Submitted on behalf of a third-party: @samirkut (Samir) |
||||
Submitted on behalf of a third-party: @posener (Eyal Posener) |
||||
Submitted on behalf of a third-party: @Ashikpaul (Ashik Paul) |
||||
Submitted on behalf of a third-party: @kwongtailau (Kwongtai) |
||||
Submitted on behalf of a third-party: @ericcornelissen (Eric Cornelissen) |
||||
Submitted on behalf of a third-party: @eclipseo (Robert-André Mauchin) |
||||
Submitted on behalf of a third-party: @yanzhoupan (Andrew Pan) |
||||
Submitted on behalf of a third-party: @STRRL (Zhiqiang ZHOU) |
||||
Submitted on behalf of a third-party: @disconnect3d (Disconnect3d) |
||||
@ -0,0 +1,8 @@ |
||||
# See the OWNERS docs at https://go.k8s.io/owners |
||||
# See the OWNERS_ALIASES file at https://github.com/kubernetes-sigs/randfill/blob/main/OWNERS_ALIASES for a list of members for each alias. |
||||
|
||||
approvers: |
||||
- sig-testing-leads |
||||
- thockin |
||||
|
||||
reviewers: [] |
||||
@ -0,0 +1,14 @@ |
||||
# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md |
||||
# This file should be kept in sync with k/org. |
||||
|
||||
aliases: |
||||
# Reference: https://github.com/kubernetes/org/blob/main/OWNERS_ALIASES |
||||
sig-testing-leads: |
||||
- BenTheElder |
||||
- alvaroaleman |
||||
- aojea |
||||
- cjwagner |
||||
- jbpratt |
||||
- michelle192837 |
||||
- pohly |
||||
- xmcqueen |
||||
45
vendor/github.com/google/gofuzz/README.md → vendor/sigs.k8s.io/randfill/README.md
generated
vendored
45
vendor/github.com/google/gofuzz/README.md → vendor/sigs.k8s.io/randfill/README.md
generated
vendored
@ -0,0 +1,16 @@ |
||||
# Defined below are the security contacts for this repo. |
||||
# |
||||
# They are the contact point for the Product Security Committee to reach out |
||||
# to for triaging and handling of incoming issues. |
||||
# |
||||
# The below names agree to abide by the |
||||
# [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy) |
||||
# and will be removed and replaced if they violate that agreement. |
||||
# |
||||
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE |
||||
# INSTRUCTIONS AT https://kubernetes.io/security/ |
||||
|
||||
thockin |
||||
BenTheElder |
||||
aojea |
||||
pohly |
||||
@ -0,0 +1,3 @@ |
||||
# Kubernetes Community Code of Conduct |
||||
|
||||
Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) |
||||
@ -0,0 +1,682 @@ |
||||
/* |
||||
Copyright 2014 Google Inc. All rights reserved. |
||||
Copyright 2014 The gofuzz Authors. |
||||
Copyright 2025 The Kubernetes Authors. |
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); |
||||
you may not use this file except in compliance with the License. |
||||
You may obtain a copy of the License at |
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software |
||||
distributed under the License is distributed on an "AS IS" BASIS, |
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
See the License for the specific language governing permissions and |
||||
limitations under the License. |
||||
*/ |
||||
|
||||
// Package randfill is a library for populating go objects with random values.
|
||||
package randfill |
||||
|
||||
import ( |
||||
"fmt" |
||||
"math/rand" |
||||
"reflect" |
||||
"regexp" |
||||
"sync" |
||||
"time" |
||||
"unsafe" |
||||
|
||||
"strings" |
||||
|
||||
"sigs.k8s.io/randfill/bytesource" |
||||
) |
||||
|
||||
// funcMap is a map from a type to a function that randfills that type. The
|
||||
// function is a reflect.Value because the type being filled is different for
|
||||
// each func.
|
||||
type funcMap map[reflect.Type]reflect.Value |
||||
|
||||
// Filler knows how to fill any object with random fields.
|
||||
type Filler struct { |
||||
customFuncs funcMap |
||||
defaultFuncs funcMap |
||||
r *rand.Rand |
||||
nilChance float64 |
||||
minElements int |
||||
maxElements int |
||||
maxDepth int |
||||
allowUnexportedFields bool |
||||
skipFieldPatterns []*regexp.Regexp |
||||
|
||||
lock sync.Mutex |
||||
} |
||||
|
||||
// New returns a new Filler. Customize your Filler further by calling Funcs,
|
||||
// RandSource, NilChance, or NumElements in any order.
|
||||
func New() *Filler { |
||||
return NewWithSeed(time.Now().UnixNano()) |
||||
} |
||||
|
||||
func NewWithSeed(seed int64) *Filler { |
||||
f := &Filler{ |
||||
defaultFuncs: funcMap{ |
||||
reflect.TypeOf(&time.Time{}): reflect.ValueOf(randfillTime), |
||||
}, |
||||
|
||||
customFuncs: funcMap{}, |
||||
r: rand.New(rand.NewSource(seed)), |
||||
nilChance: .2, |
||||
minElements: 1, |
||||
maxElements: 10, |
||||
maxDepth: 100, |
||||
allowUnexportedFields: false, |
||||
} |
||||
return f |
||||
} |
||||
|
||||
// NewFromGoFuzz is a helper function that enables using randfill (this
|
||||
// project) with go-fuzz (https://github.com/dvyukov/go-fuzz) for continuous
|
||||
// fuzzing. Essentially, it enables translating the fuzzing bytes from
|
||||
// go-fuzz to any Go object using this library.
|
||||
//
|
||||
// This implementation promises a constant translation from a given slice of
|
||||
// bytes to the fuzzed objects. This promise will remain over future
|
||||
// versions of Go and of this library.
|
||||
//
|
||||
// Note: the returned Filler should not be shared between multiple goroutines,
|
||||
// as its deterministic output will no longer be available.
|
||||
//
|
||||
// Example: use go-fuzz to test the function `MyFunc(int)` in the package
|
||||
// `mypackage`. Add the file: "mypackage_fuzz.go" with the content:
|
||||
//
|
||||
// // +build gofuzz
|
||||
// package mypackage
|
||||
// import "sigs.k8s.io/randfill"
|
||||
//
|
||||
// func Fuzz(data []byte) int {
|
||||
// var i int
|
||||
// randfill.NewFromGoFuzz(data).Fill(&i)
|
||||
// MyFunc(i)
|
||||
// return 0
|
||||
// }
|
||||
func NewFromGoFuzz(data []byte) *Filler { |
||||
return New().RandSource(bytesource.New(data)) |
||||
} |
||||
|
||||
// Funcs registers custom fill functions for this Filler.
|
||||
//
|
||||
// Each entry in customFuncs must be a function taking two parameters.
|
||||
// The first parameter must be a pointer or map. It is the variable that
|
||||
// function will fill with random data. The second parameter must be a
|
||||
// randfill.Continue, which will provide a source of randomness and a way
|
||||
// to automatically continue filling smaller pieces of the first parameter.
|
||||
//
|
||||
// These functions are called sensibly, e.g., if you wanted custom string
|
||||
// filling, the function `func(s *string, c randfill.Continue)` would get
|
||||
// called and passed the address of strings. Maps and pointers will always
|
||||
// be made/new'd for you, ignoring the NilChance option. For slices, it
|
||||
// doesn't make much sense to pre-create them--Filler doesn't know how
|
||||
// long you want your slice--so take a pointer to a slice, and make it
|
||||
// yourself. (If you don't want your map/pointer type pre-made, take a
|
||||
// pointer to it, and make it yourself.) See the examples for a range of
|
||||
// custom functions.
|
||||
//
|
||||
// If a function is already registered for a type, and a new function is
|
||||
// provided, the previous function will be replaced with the new one.
|
||||
func (f *Filler) Funcs(customFuncs ...interface{}) *Filler { |
||||
for i := range customFuncs { |
||||
v := reflect.ValueOf(customFuncs[i]) |
||||
if v.Kind() != reflect.Func { |
||||
panic("Filler.Funcs: all arguments must be functions") |
||||
} |
||||
t := v.Type() |
||||
if t.NumIn() != 2 || t.NumOut() != 0 { |
||||
panic("Filler.Funcs: all customFuncs must have 2 arguments and 0 returns") |
||||
} |
||||
argT := t.In(0) |
||||
switch argT.Kind() { |
||||
case reflect.Ptr, reflect.Map: |
||||
default: |
||||
panic("Filler.Funcs: customFuncs' first argument must be a pointer or map type") |
||||
} |
||||
if t.In(1) != reflect.TypeOf(Continue{}) { |
||||
panic("Filler.Funcs: customFuncs' second argument must be a randfill.Continue") |
||||
} |
||||
f.customFuncs[argT] = v |
||||
} |
||||
return f |
||||
} |
||||
|
||||
// RandSource causes this Filler to get values from the given source of
|
||||
// randomness. Use this if you want deterministic filling.
|
||||
func (f *Filler) RandSource(s rand.Source) *Filler { |
||||
f.r = rand.New(s) |
||||
return f |
||||
} |
||||
|
||||
// NilChance sets the probability of creating a nil pointer, map, or slice to
|
||||
// 'p'. 'p' should be between 0 (no nils) and 1 (all nils), inclusive.
|
||||
func (f *Filler) NilChance(p float64) *Filler { |
||||
if p < 0 || p > 1 { |
||||
panic("Filler.NilChance: p must be between 0 and 1, inclusive") |
||||
} |
||||
f.nilChance = p |
||||
return f |
||||
} |
||||
|
||||
// NumElements sets the minimum and maximum number of elements that will be
|
||||
// added to a non-nil map or slice.
|
||||
func (f *Filler) NumElements(min, max int) *Filler { |
||||
if min < 0 { |
||||
panic("Filler.NumElements: min must be >= 0") |
||||
} |
||||
if min > max { |
||||
panic("Filler.NumElements: min must be <= max") |
||||
} |
||||
f.minElements = min |
||||
f.maxElements = max |
||||
return f |
||||
} |
||||
|
||||
func (f *Filler) genElementCount() int { |
||||
if f.minElements == f.maxElements { |
||||
return f.minElements |
||||
} |
||||
return f.minElements + f.r.Intn(f.maxElements-f.minElements+1) |
||||
} |
||||
|
||||
func (f *Filler) genShouldFill() bool { |
||||
return f.r.Float64() >= f.nilChance |
||||
} |
||||
|
||||
// MaxDepth sets the maximum number of recursive fill calls that will be made
|
||||
// before stopping. This includes struct members, pointers, and map and slice
|
||||
// elements.
|
||||
func (f *Filler) MaxDepth(d int) *Filler { |
||||
f.maxDepth = d |
||||
return f |
||||
} |
||||
|
||||
// AllowUnexportedFields defines whether to fill unexported fields.
|
||||
func (f *Filler) AllowUnexportedFields(flag bool) *Filler { |
||||
f.allowUnexportedFields = flag |
||||
return f |
||||
} |
||||
|
||||
// SkipFieldsWithPattern tells this Filler to skip any field whose name matches
|
||||
// the supplied pattern. Call this multiple times if needed. This is useful to
|
||||
// skip XXX_ fields generated by protobuf.
|
||||
func (f *Filler) SkipFieldsWithPattern(pattern *regexp.Regexp) *Filler { |
||||
f.skipFieldPatterns = append(f.skipFieldPatterns, pattern) |
||||
return f |
||||
} |
||||
|
||||
// SimpleSelfFiller represents an object that knows how to randfill itself.
|
||||
//
|
||||
// Unlike NativeSelfFiller, this interface does not cause the type in question
|
||||
// to depend on the randfill package. This is most useful for simple types. For
|
||||
// more complex types, consider using NativeSelfFiller.
|
||||
type SimpleSelfFiller interface { |
||||
// RandFill fills the current object with random data.
|
||||
RandFill(r *rand.Rand) |
||||
} |
||||
|
||||
// NativeSelfFiller represents an object that knows how to randfill itself.
|
||||
//
|
||||
// Unlike SimpleSelfFiller, this interface allows for recursive filling of
|
||||
// child objects with the same rules as the parent Filler.
|
||||
type NativeSelfFiller interface { |
||||
// RandFill fills the current object with random data.
|
||||
RandFill(c Continue) |
||||
} |
||||
|
||||
// Fill recursively fills all of obj's fields with something random. First
|
||||
// this tries to find a custom fill function (see Funcs). If there is no
|
||||
// custom function, this tests whether the object implements SimpleSelfFiller
|
||||
// or NativeSelfFiller and if so, calls RandFill on it to fill itself. If that
|
||||
// fails, this will see if there is a default fill function provided by this
|
||||
// package. If all of that fails, this will generate random values for all
|
||||
// primitive fields and then recurse for all non-primitives.
|
||||
//
|
||||
// This is safe for cyclic or tree-like structs, up to a limit. Use the
|
||||
// MaxDepth method to adjust how deep you need it to recurse.
|
||||
//
|
||||
// obj must be a pointer. Exported (public) fields can always be set, and if
|
||||
// the AllowUnexportedFields() modifier was called it can try to set unexported
|
||||
// (private) fields, too.
|
||||
//
|
||||
// This is intended for tests, so will panic on bad input or unimplemented
|
||||
// types. This method takes a lock for the whole Filler, so it is not
|
||||
// reentrant. See Continue.
|
||||
func (f *Filler) Fill(obj interface{}) { |
||||
f.lock.Lock() |
||||
defer f.lock.Unlock() |
||||
|
||||
v := reflect.ValueOf(obj) |
||||
if v.Kind() != reflect.Ptr { |
||||
panic("Filler.Fill: obj must be a pointer") |
||||
} |
||||
v = v.Elem() |
||||
f.fillWithContext(v, 0) |
||||
} |
||||
|
||||
// FillNoCustom is just like Fill, except that any custom fill function for
|
||||
// obj's type will not be called and obj will not be tested for
|
||||
// SimpleSelfFiller or NativeSelfFiller. This applies only to obj and not other
|
||||
// instances of obj's type or to obj's child fields.
|
||||
//
|
||||
// obj must be a pointer. Exported (public) fields can always be set, and if
|
||||
// the AllowUnexportedFields() modifier was called it can try to set unexported
|
||||
// (private) fields, too.
|
||||
//
|
||||
// This is intended for tests, so will panic on bad input or unimplemented
|
||||
// types. This method takes a lock for the whole Filler, so it is not
|
||||
// reentrant. See Continue.
|
||||
func (f *Filler) FillNoCustom(obj interface{}) { |
||||
f.lock.Lock() |
||||
defer f.lock.Unlock() |
||||
|
||||
v := reflect.ValueOf(obj) |
||||
if v.Kind() != reflect.Ptr { |
||||
panic("Filler.FillNoCustom: obj must be a pointer") |
||||
} |
||||
v = v.Elem() |
||||
f.fillWithContext(v, flagNoCustomFill) |
||||
} |
||||
|
||||
const ( |
||||
// Do not try to find a custom fill function. Does not apply recursively.
|
||||
flagNoCustomFill uint64 = 1 << iota |
||||
) |
||||
|
||||
func (f *Filler) fillWithContext(v reflect.Value, flags uint64) { |
||||
fc := &fillerContext{filler: f} |
||||
fc.doFill(v, flags) |
||||
} |
||||
|
||||
// fillerContext carries context about a single filling run, which lets Filler
|
||||
// be thread-safe.
|
||||
type fillerContext struct { |
||||
filler *Filler |
||||
curDepth int |
||||
} |
||||
|
||||
func (fc *fillerContext) doFill(v reflect.Value, flags uint64) { |
||||
if fc.curDepth >= fc.filler.maxDepth { |
||||
return |
||||
} |
||||
fc.curDepth++ |
||||
defer func() { fc.curDepth-- }() |
||||
|
||||
if !v.CanSet() { |
||||
if !fc.filler.allowUnexportedFields || !v.CanAddr() { |
||||
return |
||||
} |
||||
v = reflect.NewAt(v.Type(), unsafe.Pointer(v.UnsafeAddr())).Elem() |
||||
} |
||||
|
||||
if flags&flagNoCustomFill == 0 { |
||||
// Check for both pointer and non-pointer custom functions.
|
||||
if v.CanAddr() && fc.tryCustom(v.Addr()) { |
||||
return |
||||
} |
||||
if fc.tryCustom(v) { |
||||
return |
||||
} |
||||
} |
||||
|
||||
if fn, ok := fillFuncMap[v.Kind()]; ok { |
||||
fn(v, fc.filler.r) |
||||
return |
||||
} |
||||
|
||||
switch v.Kind() { |
||||
case reflect.Map: |
||||
if fc.filler.genShouldFill() { |
||||
v.Set(reflect.MakeMap(v.Type())) |
||||
n := fc.filler.genElementCount() |
||||
for i := 0; i < n; i++ { |
||||
key := reflect.New(v.Type().Key()).Elem() |
||||
fc.doFill(key, 0) |
||||
val := reflect.New(v.Type().Elem()).Elem() |
||||
fc.doFill(val, 0) |
||||
v.SetMapIndex(key, val) |
||||
} |
||||
return |
||||
} |
||||
v.Set(reflect.Zero(v.Type())) |
||||
case reflect.Ptr: |
||||
if fc.filler.genShouldFill() { |
||||
v.Set(reflect.New(v.Type().Elem())) |
||||
fc.doFill(v.Elem(), 0) |
||||
return |
||||
} |
||||
v.Set(reflect.Zero(v.Type())) |
||||
case reflect.Slice: |
||||
if fc.filler.genShouldFill() { |
||||
n := fc.filler.genElementCount() |
||||
v.Set(reflect.MakeSlice(v.Type(), n, n)) |
||||
for i := 0; i < n; i++ { |
||||
fc.doFill(v.Index(i), 0) |
||||
} |
||||
return |
||||
} |
||||
v.Set(reflect.Zero(v.Type())) |
||||
case reflect.Array: |
||||
if fc.filler.genShouldFill() { |
||||
n := v.Len() |
||||
for i := 0; i < n; i++ { |
||||
fc.doFill(v.Index(i), 0) |
||||
} |
||||
return |
||||
} |
||||
v.Set(reflect.Zero(v.Type())) |
||||
case reflect.Struct: |
||||
for i := 0; i < v.NumField(); i++ { |
||||
skipField := false |
||||
fieldName := v.Type().Field(i).Name |
||||
for _, pattern := range fc.filler.skipFieldPatterns { |
||||
if pattern.MatchString(fieldName) { |
||||
skipField = true |
||||
break |
||||
} |
||||
} |
||||
if !skipField { |
||||
fc.doFill(v.Field(i), 0) |
||||
} |
||||
} |
||||
case reflect.Chan: |
||||
fallthrough |
||||
case reflect.Func: |
||||
fallthrough |
||||
case reflect.Interface: |
||||
fallthrough |
||||
default: |
||||
panic(fmt.Sprintf("can't fill type %v, kind %v", v.Type(), v.Kind())) |
||||
} |
||||
} |
||||
|
||||
// tryCustom searches for custom handlers, and returns true iff it finds a match
|
||||
// and successfully randomizes v.
|
||||
func (fc *fillerContext) tryCustom(v reflect.Value) bool { |
||||
// First: see if we have a fill function for it.
|
||||
doCustom, ok := fc.filler.customFuncs[v.Type()] |
||||
if !ok { |
||||
// Second: see if it can fill itself.
|
||||
if v.CanInterface() { |
||||
intf := v.Interface() |
||||
if fillable, ok := intf.(SimpleSelfFiller); ok { |
||||
fillable.RandFill(fc.filler.r) |
||||
return true |
||||
} |
||||
if fillable, ok := intf.(NativeSelfFiller); ok { |
||||
fillable.RandFill(Continue{fc: fc, Rand: fc.filler.r}) |
||||
return true |
||||
} |
||||
} |
||||
// Finally: see if there is a default fill function.
|
||||
doCustom, ok = fc.filler.defaultFuncs[v.Type()] |
||||
if !ok { |
||||
return false |
||||
} |
||||
} |
||||
|
||||
switch v.Kind() { |
||||
case reflect.Ptr: |
||||
if v.IsNil() { |
||||
if !v.CanSet() { |
||||
return false |
||||
} |
||||
v.Set(reflect.New(v.Type().Elem())) |
||||
} |
||||
case reflect.Map: |
||||
if v.IsNil() { |
||||
if !v.CanSet() { |
||||
return false |
||||
} |
||||
v.Set(reflect.MakeMap(v.Type())) |
||||
} |
||||
default: |
||||
return false |
||||
} |
||||
|
||||
doCustom.Call([]reflect.Value{ |
||||
v, |
||||
reflect.ValueOf(Continue{ |
||||
fc: fc, |
||||
Rand: fc.filler.r, |
||||
}), |
||||
}) |
||||
return true |
||||
} |
||||
|
||||
// Continue can be passed to custom fill functions to allow them to use
|
||||
// the correct source of randomness and to continue filling their members.
|
||||
type Continue struct { |
||||
fc *fillerContext |
||||
|
||||
// For convenience, Continue implements rand.Rand via embedding.
|
||||
// Use this for generating any randomness if you want your filling
|
||||
// to be repeatable for a given seed.
|
||||
*rand.Rand |
||||
} |
||||
|
||||
// Fill continues filling obj. obj must be a pointer or a reflect.Value of a
|
||||
// pointer. See Filler.Fill.
|
||||
func (c Continue) Fill(obj interface{}) { |
||||
v, ok := obj.(reflect.Value) |
||||
if !ok { |
||||
v = reflect.ValueOf(obj) |
||||
} |
||||
if v.Kind() != reflect.Ptr { |
||||
panic("Continue.Fill: obj must be a pointer") |
||||
} |
||||
v = v.Elem() |
||||
c.fc.doFill(v, 0) |
||||
} |
||||
|
||||
// FillNoCustom continues filling obj, except that any custom fill function for
|
||||
// obj's type will not be called and obj will not be tested for
|
||||
// SimpleSelfFiller or NativeSelfFiller. See Filler.FillNoCustom.
|
||||
func (c Continue) FillNoCustom(obj interface{}) { |
||||
v, ok := obj.(reflect.Value) |
||||
if !ok { |
||||
v = reflect.ValueOf(obj) |
||||
} |
||||
if v.Kind() != reflect.Ptr { |
||||
panic("Continue.FillNoCustom: obj must be a pointer") |
||||
} |
||||
v = v.Elem() |
||||
c.fc.doFill(v, flagNoCustomFill) |
||||
} |
||||
|
||||
const defaultStringMaxLen = 20 |
||||
|
||||
// String makes a random string up to n characters long. If n is 0, the default
|
||||
// size range is [0-20). The returned string may include a variety of (valid)
|
||||
// UTF-8 encodings.
|
||||
func (c Continue) String(n int) string { |
||||
return randString(c.Rand, n) |
||||
} |
||||
|
||||
// Uint64 makes random 64 bit numbers.
|
||||
// Weirdly, rand doesn't have a function that gives you 64 random bits.
|
||||
func (c Continue) Uint64() uint64 { |
||||
return randUint64(c.Rand) |
||||
} |
||||
|
||||
// Bool returns true or false randomly.
|
||||
func (c Continue) Bool() bool { |
||||
return randBool(c.Rand) |
||||
} |
||||
|
||||
func fillInt(v reflect.Value, r *rand.Rand) { |
||||
v.SetInt(int64(randUint64(r))) |
||||
} |
||||
|
||||
func fillUint(v reflect.Value, r *rand.Rand) { |
||||
v.SetUint(randUint64(r)) |
||||
} |
||||
|
||||
func randfillTime(t *time.Time, c Continue) { |
||||
var sec, nsec int64 |
||||
// Allow for about 1000 years of random time values, which keeps things
|
||||
// like JSON parsing reasonably happy.
|
||||
sec = c.Rand.Int63n(1000 * 365 * 24 * 60 * 60) |
||||
// Nanosecond values greater than 1Bn are technically allowed but result in
|
||||
// time.Time values with invalid timezone offsets.
|
||||
nsec = c.Rand.Int63n(999999999) |
||||
*t = time.Unix(sec, nsec) |
||||
} |
||||
|
||||
var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){ |
||||
reflect.Bool: func(v reflect.Value, r *rand.Rand) { |
||||
v.SetBool(randBool(r)) |
||||
}, |
||||
reflect.Int: fillInt, |
||||
reflect.Int8: fillInt, |
||||
reflect.Int16: fillInt, |
||||
reflect.Int32: fillInt, |
||||
reflect.Int64: fillInt, |
||||
reflect.Uint: fillUint, |
||||
reflect.Uint8: fillUint, |
||||
reflect.Uint16: fillUint, |
||||
reflect.Uint32: fillUint, |
||||
reflect.Uint64: fillUint, |
||||
reflect.Uintptr: fillUint, |
||||
reflect.Float32: func(v reflect.Value, r *rand.Rand) { |
||||
v.SetFloat(float64(r.Float32())) |
||||
}, |
||||
reflect.Float64: func(v reflect.Value, r *rand.Rand) { |
||||
v.SetFloat(r.Float64()) |
||||
}, |
||||
reflect.Complex64: func(v reflect.Value, r *rand.Rand) { |
||||
v.SetComplex(complex128(complex(r.Float32(), r.Float32()))) |
||||
}, |
||||
reflect.Complex128: func(v reflect.Value, r *rand.Rand) { |
||||
v.SetComplex(complex(r.Float64(), r.Float64())) |
||||
}, |
||||
reflect.String: func(v reflect.Value, r *rand.Rand) { |
||||
v.SetString(randString(r, 0)) |
||||
}, |
||||
reflect.UnsafePointer: func(v reflect.Value, r *rand.Rand) { |
||||
panic("filling of UnsafePointers is not implemented") |
||||
}, |
||||
} |
||||
|
||||
// randBool returns true or false randomly.
|
||||
func randBool(r *rand.Rand) bool { |
||||
return r.Int31()&(1<<30) == 0 |
||||
} |
||||
|
||||
type int63nPicker interface { |
||||
Int63n(int64) int64 |
||||
} |
||||
|
||||
// UnicodeRange describes a sequential range of unicode characters.
|
||||
// Last must be numerically greater than First.
|
||||
type UnicodeRange struct { |
||||
First, Last rune |
||||
} |
||||
|
||||
// UnicodeRanges describes an arbitrary number of sequential ranges of unicode characters.
|
||||
// To be useful, each range must have at least one character (First <= Last) and
|
||||
// there must be at least one range.
|
||||
type UnicodeRanges []UnicodeRange |
||||
|
||||
// choose returns a random unicode character from the given range, using the
|
||||
// given randomness source.
|
||||
func (ur UnicodeRange) choose(r int63nPicker) rune { |
||||
count := int64(ur.Last - ur.First + 1) |
||||
return ur.First + rune(r.Int63n(count)) |
||||
} |
||||
|
||||
// CustomStringFillFunc constructs a FillFunc which produces random strings.
|
||||
// Each character is selected from the range ur. If there are no characters
|
||||
// in the range (cr.Last < cr.First), this will panic.
|
||||
func (ur UnicodeRange) CustomStringFillFunc(n int) func(s *string, c Continue) { |
||||
ur.check() |
||||
return func(s *string, c Continue) { |
||||
*s = ur.randString(c.Rand, n) |
||||
} |
||||
} |
||||
|
||||
// check is a function that used to check whether the first of ur(UnicodeRange)
|
||||
// is greater than the last one.
|
||||
func (ur UnicodeRange) check() { |
||||
if ur.Last < ur.First { |
||||
panic("UnicodeRange.check: the last encoding must be greater than the first") |
||||
} |
||||
} |
||||
|
||||
// randString of UnicodeRange makes a random string up to 20 characters long.
|
||||
// Each character is selected form ur(UnicodeRange).
|
||||
func (ur UnicodeRange) randString(r *rand.Rand, max int) string { |
||||
if max == 0 { |
||||
max = defaultStringMaxLen |
||||
} |
||||
n := r.Intn(max) |
||||
sb := strings.Builder{} |
||||
sb.Grow(n) |
||||
for i := 0; i < n; i++ { |
||||
sb.WriteRune(ur.choose(r)) |
||||
} |
||||
return sb.String() |
||||
} |
||||
|
||||
// defaultUnicodeRanges sets a default unicode range when users do not set
|
||||
// CustomStringFillFunc() but want to fill strings.
|
||||
var defaultUnicodeRanges = UnicodeRanges{ |
||||
{' ', '~'}, // ASCII characters
|
||||
{'\u00a0', '\u02af'}, // Multi-byte encoded characters
|
||||
{'\u4e00', '\u9fff'}, // Common CJK (even longer encodings)
|
||||
} |
||||
|
||||
// CustomStringFillFunc constructs a FillFunc which produces random strings.
|
||||
// Each character is selected from one of the ranges of ur(UnicodeRanges).
|
||||
// Each range has an equal probability of being chosen. If there are no ranges,
|
||||
// or a selected range has no characters (.Last < .First), this will panic.
|
||||
// Do not modify any of the ranges in ur after calling this function.
|
||||
func (ur UnicodeRanges) CustomStringFillFunc(n int) func(s *string, c Continue) { |
||||
// Check unicode ranges slice is empty.
|
||||
if len(ur) == 0 { |
||||
panic("UnicodeRanges is empty") |
||||
} |
||||
// if not empty, each range should be checked.
|
||||
for i := range ur { |
||||
ur[i].check() |
||||
} |
||||
return func(s *string, c Continue) { |
||||
*s = ur.randString(c.Rand, n) |
||||
} |
||||
} |
||||
|
||||
// randString of UnicodeRanges makes a random string up to 20 characters long.
|
||||
// Each character is selected form one of the ranges of ur(UnicodeRanges),
|
||||
// and each range has an equal probability of being chosen.
|
||||
func (ur UnicodeRanges) randString(r *rand.Rand, max int) string { |
||||
if max == 0 { |
||||
max = defaultStringMaxLen |
||||
} |
||||
n := r.Intn(max) |
||||
sb := strings.Builder{} |
||||
sb.Grow(n) |
||||
for i := 0; i < n; i++ { |
||||
sb.WriteRune(ur[r.Intn(len(ur))].choose(r)) |
||||
} |
||||
return sb.String() |
||||
} |
||||
|
||||
// randString makes a random string up to 20 characters long. The returned string
|
||||
// may include a variety of (valid) UTF-8 encodings.
|
||||
func randString(r *rand.Rand, max int) string { |
||||
return defaultUnicodeRanges.randString(r, max) |
||||
} |
||||
|
||||
// randUint64 makes random 64 bit numbers.
|
||||
// Weirdly, rand doesn't have a function that gives you 64 random bits.
|
||||
func randUint64(r *rand.Rand) uint64 { |
||||
return uint64(r.Uint32())<<32 | uint64(r.Uint32()) |
||||
} |
||||
Loading…
Reference in new issue