Signed-off-by: Paulo Dias <paulodias.gm@gmail.com>pull/15539/head
parent
ac92cf256e
commit
fc0141aec2
@ -0,0 +1,201 @@ |
||||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package openstack |
||||
|
||||
import ( |
||||
"context" |
||||
"fmt" |
||||
"log" |
||||
"log/slog" |
||||
"net" |
||||
"strconv" |
||||
"strings" |
||||
|
||||
"github.com/gophercloud/gophercloud" |
||||
"github.com/gophercloud/gophercloud/openstack" |
||||
"github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/listeners" |
||||
"github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/loadbalancers" |
||||
"github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips" |
||||
"github.com/prometheus/common/model" |
||||
"github.com/prometheus/common/promslog" |
||||
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup" |
||||
) |
||||
|
||||
const ( |
||||
openstackLabelLoadBalancerID = openstackLabelPrefix + "loadbalancer_id" |
||||
openstackLabelLoadBalancerName = openstackLabelPrefix + "loadbalancer_name" |
||||
openstackLabelLoadBalancerStatus = openstackLabelPrefix + "loadbalancer_status" |
||||
openstackLabelLoadBalancerAvailabilityZone = openstackLabelPrefix + "loadbalancer_availability_zone" |
||||
openstackLabelLoadBalancerFloatingIP = openstackLabelPrefix + "loadbalancer_floating_ip" |
||||
openstackLabelLoadBalancerVIP = openstackLabelPrefix + "loadbalancer_vip" |
||||
openstackLabelLoadBalancerProvider = openstackLabelPrefix + "loadbalancer_provider" |
||||
openstackLabelLoadBalancerTags = openstackLabelPrefix + "loadbalancer_tags" |
||||
) |
||||
|
||||
// InstanceDiscovery discovers OpenStack instances.
|
||||
type LoadBalancerDiscovery struct { |
||||
provider *gophercloud.ProviderClient |
||||
authOpts *gophercloud.AuthOptions |
||||
region string |
||||
logger *slog.Logger |
||||
availability gophercloud.Availability |
||||
} |
||||
|
||||
// NewInstanceDiscovery returns a new instance discovery.
|
||||
func newLoadBalancerDiscovery(provider *gophercloud.ProviderClient, opts *gophercloud.AuthOptions, |
||||
region string, availability gophercloud.Availability, l *slog.Logger, |
||||
) *LoadBalancerDiscovery { |
||||
if l == nil { |
||||
l = promslog.NewNopLogger() |
||||
} |
||||
return &LoadBalancerDiscovery{ |
||||
provider: provider, authOpts: opts, |
||||
region: region, availability: availability, logger: l, |
||||
} |
||||
} |
||||
|
||||
func (i *LoadBalancerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { |
||||
i.provider.Context = ctx |
||||
err := openstack.Authenticate(i.provider, *i.authOpts) |
||||
if err != nil { |
||||
return nil, fmt.Errorf("could not authenticate to OpenStack: %w", err) |
||||
} |
||||
|
||||
client, err := openstack.NewLoadBalancerV2(i.provider, gophercloud.EndpointOpts{ |
||||
Region: i.region, Availability: i.availability, |
||||
}) |
||||
if err != nil { |
||||
return nil, fmt.Errorf("could not create OpenStack load balancer session: %w", err) |
||||
} |
||||
|
||||
networkClient, err := openstack.NewNetworkV2(i.provider, gophercloud.EndpointOpts{ |
||||
Region: i.region, Availability: i.availability, |
||||
}) |
||||
if err != nil { |
||||
return nil, fmt.Errorf("could not create OpenStack network session: %w", err) |
||||
} |
||||
|
||||
allPages, err := loadbalancers.List(client, loadbalancers.ListOpts{}).AllPages() |
||||
if err != nil { |
||||
return nil, fmt.Errorf("failed to list load balancers: %w", err) |
||||
} |
||||
|
||||
allLBs, err := loadbalancers.ExtractLoadBalancers(allPages) |
||||
if err != nil { |
||||
return nil, fmt.Errorf("failed to extract load balancers: %w", err) |
||||
} |
||||
|
||||
// Fetch all listeners in one API call
|
||||
listenerPages, err := listeners.List(client, listeners.ListOpts{}).AllPages() |
||||
if err != nil { |
||||
return nil, fmt.Errorf("failed to list all listeners: %w", err) |
||||
} |
||||
|
||||
allListeners, err := listeners.ExtractListeners(listenerPages) |
||||
if err != nil { |
||||
return nil, fmt.Errorf("failed to extract all listeners: %w", err) |
||||
} |
||||
|
||||
// Create a map to group listeners by Load Balancer ID
|
||||
listenerMap := make(map[string][]listeners.Listener) |
||||
for _, listener := range allListeners { |
||||
// Iterate through each associated Load Balancer ID in the Loadbalancers array
|
||||
for _, lb := range listener.Loadbalancers { |
||||
listenerMap[lb.ID] = append(listenerMap[lb.ID], listener) |
||||
} |
||||
} |
||||
|
||||
// Fetch all floating IPs with pagination
|
||||
fipPages, err := floatingips.List(networkClient, floatingips.ListOpts{}).AllPages() |
||||
if err != nil { |
||||
log.Printf("Error calling OpenStack API: %v", err) |
||||
return nil, fmt.Errorf("failed to list all fips: %w", err) |
||||
} |
||||
if err != nil { |
||||
return nil, fmt.Errorf("failed to list floating IPs: %w", err) |
||||
} |
||||
|
||||
allFIPs, err := floatingips.ExtractFloatingIPs(fipPages) |
||||
if err != nil { |
||||
return nil, fmt.Errorf("failed to extract floating IPs: %w", err) |
||||
} |
||||
|
||||
// Create a map to associate floating IPs with their resource IDs
|
||||
fipMap := make(map[string]string) // Key: LoadBalancerID/PortID, Value: Floating IP
|
||||
for _, fip := range allFIPs { |
||||
if fip.PortID != "" { |
||||
fipMap[fip.PortID] = fip.FloatingIP |
||||
} |
||||
} |
||||
|
||||
tg := &targetgroup.Group{ |
||||
Source: "OS_" + i.region, |
||||
} |
||||
|
||||
for _, lb := range allLBs { |
||||
// Retrieve listeners for this load balancer from the map
|
||||
lbListeners, exists := listenerMap[lb.ID] |
||||
if !exists || len(lbListeners) == 0 { |
||||
i.logger.Debug("Got no listener", "loadbalancer", lb.ID) |
||||
continue |
||||
} |
||||
|
||||
// Variable to store the port of the first PROMETHEUS listener
|
||||
var listenerPort int |
||||
hasPrometheusListener := false |
||||
|
||||
// Check if any listener has the PROMETHEUS protocol
|
||||
for _, listener := range lbListeners { |
||||
if listener.Protocol == "PROMETHEUS" { |
||||
hasPrometheusListener = true |
||||
listenerPort = listener.ProtocolPort |
||||
break |
||||
} |
||||
} |
||||
|
||||
// Skip LBs without PROMETHEUS listener protocol
|
||||
if !hasPrometheusListener { |
||||
i.logger.Debug("Got no PROMETHEUS listener", "loadbalancer", lb.ID) |
||||
continue |
||||
} |
||||
|
||||
labels := model.LabelSet{} |
||||
addr := net.JoinHostPort(lb.VipAddress, strconv.Itoa(listenerPort)) |
||||
labels[model.AddressLabel] = model.LabelValue(addr) |
||||
labels[openstackLabelLoadBalancerID] = model.LabelValue(lb.ID) |
||||
labels[openstackLabelLoadBalancerName] = model.LabelValue(lb.Name) |
||||
labels[openstackLabelLoadBalancerStatus] = model.LabelValue(lb.ProvisioningStatus) |
||||
labels[openstackLabelLoadBalancerAvailabilityZone] = model.LabelValue(lb.AvailabilityZone) |
||||
labels[openstackLabelLoadBalancerVIP] = model.LabelValue(lb.VipAddress) |
||||
labels[openstackLabelLoadBalancerProvider] = model.LabelValue(lb.Provider) |
||||
labels[openstackLabelProjectID] = model.LabelValue(lb.ProjectID) |
||||
|
||||
if len(lb.Tags) > 0 { |
||||
labels[openstackLabelLoadBalancerTags] = model.LabelValue(strings.Join(lb.Tags, ",")) |
||||
} |
||||
|
||||
if floatingIP, exists := fipMap[lb.VipPortID]; exists { |
||||
labels[openstackLabelLoadBalancerFloatingIP] = model.LabelValue(floatingIP) |
||||
} |
||||
|
||||
tg.Targets = append(tg.Targets, labels) |
||||
} |
||||
|
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
return []*targetgroup.Group{tg}, nil |
||||
} |
||||
@ -0,0 +1,133 @@ |
||||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package openstack |
||||
|
||||
import ( |
||||
"context" |
||||
"fmt" |
||||
"testing" |
||||
|
||||
"github.com/prometheus/common/model" |
||||
"github.com/stretchr/testify/require" |
||||
) |
||||
|
||||
type OpenstackSDLoadBalancerTestSuite struct { |
||||
Mock *SDMock |
||||
} |
||||
|
||||
func (s *OpenstackSDLoadBalancerTestSuite) SetupTest(t *testing.T) { |
||||
s.Mock = NewSDMock(t) |
||||
s.Mock.Setup() |
||||
|
||||
s.Mock.HandleLoadBalancerListSuccessfully() |
||||
s.Mock.HandleListenersListSuccessfully() |
||||
s.Mock.HandleFloatingIPsListSuccessfully() |
||||
|
||||
s.Mock.HandleVersionsSuccessfully() |
||||
s.Mock.HandleAuthSuccessfully() |
||||
} |
||||
|
||||
func (s *OpenstackSDLoadBalancerTestSuite) openstackAuthSuccess() (refresher, error) { |
||||
conf := SDConfig{ |
||||
IdentityEndpoint: s.Mock.Endpoint(), |
||||
Password: "test", |
||||
Username: "test", |
||||
DomainName: "12345", |
||||
Region: "RegionOne", |
||||
Role: "loadbalancer", |
||||
} |
||||
return newRefresher(&conf, nil) |
||||
} |
||||
|
||||
func TestOpenstackSDLoadBalancerRefresh(t *testing.T) { |
||||
mock := &OpenstackSDLoadBalancerTestSuite{} |
||||
mock.SetupTest(t) |
||||
|
||||
instance, err := mock.openstackAuthSuccess() |
||||
require.NoError(t, err) |
||||
|
||||
ctx := context.Background() |
||||
tgs, err := instance.refresh(ctx) |
||||
|
||||
require.NoError(t, err) |
||||
require.Len(t, tgs, 1) |
||||
|
||||
tg := tgs[0] |
||||
require.NotNil(t, tg) |
||||
require.NotNil(t, tg.Targets) |
||||
require.Len(t, tg.Targets, 4) |
||||
|
||||
for i, lbls := range []model.LabelSet{ |
||||
{ |
||||
"__address__": model.LabelValue("10.0.0.32:9273"), |
||||
"__meta_openstack_loadbalancer_id": model.LabelValue("ef079b0c-e610-4dfb-b1aa-b49f07ac48e5"), |
||||
"__meta_openstack_loadbalancer_name": model.LabelValue("lb1"), |
||||
"__meta_openstack_loadbalancer_status": model.LabelValue("ACTIVE"), |
||||
"__meta_openstack_loadbalancer_availability_zone": model.LabelValue("az1"), |
||||
"__meta_openstack_loadbalancer_floating_ip": model.LabelValue("192.168.1.2"), |
||||
"__meta_openstack_loadbalancer_vip": model.LabelValue("10.0.0.32"), |
||||
"__meta_openstack_loadbalancer_provider": model.LabelValue("amphora"), |
||||
"__meta_openstack_loadbalancer_tags": model.LabelValue("tag1,tag2"), |
||||
"__meta_openstack_project_id": model.LabelValue("fcad67a6189847c4aecfa3c81a05783b"), |
||||
}, |
||||
{ |
||||
"__address__": model.LabelValue("10.0.2.78:8080"), |
||||
"__meta_openstack_loadbalancer_id": model.LabelValue("d92c471e-8d3e-4b9f-b2b5-9c72a9e3ef54"), |
||||
"__meta_openstack_loadbalancer_name": model.LabelValue("lb3"), |
||||
"__meta_openstack_loadbalancer_status": model.LabelValue("ACTIVE"), |
||||
"__meta_openstack_loadbalancer_availability_zone": model.LabelValue("az3"), |
||||
"__meta_openstack_loadbalancer_floating_ip": model.LabelValue("192.168.3.4"), |
||||
"__meta_openstack_loadbalancer_vip": model.LabelValue("10.0.2.78"), |
||||
"__meta_openstack_loadbalancer_provider": model.LabelValue("amphora"), |
||||
"__meta_openstack_loadbalancer_tags": model.LabelValue("tag5,tag6"), |
||||
"__meta_openstack_project_id": model.LabelValue("ac57f03dba1a4fdebff3e67201bc7a85"), |
||||
}, |
||||
{ |
||||
"__address__": model.LabelValue("10.0.3.99:9090"), |
||||
"__meta_openstack_loadbalancer_id": model.LabelValue("f5c7e918-df38-4a5a-a7d4-d9c27ab2cf67"), |
||||
"__meta_openstack_loadbalancer_name": model.LabelValue("lb4"), |
||||
"__meta_openstack_loadbalancer_status": model.LabelValue("ACTIVE"), |
||||
"__meta_openstack_loadbalancer_availability_zone": model.LabelValue("az1"), |
||||
"__meta_openstack_loadbalancer_floating_ip": model.LabelValue("192.168.4.5"), |
||||
"__meta_openstack_loadbalancer_vip": model.LabelValue("10.0.3.99"), |
||||
"__meta_openstack_loadbalancer_provider": model.LabelValue("amphora"), |
||||
"__meta_openstack_project_id": model.LabelValue("fa8c372dfe4d4c92b0c4e3a2d9b3c9fa"), |
||||
}, |
||||
{ |
||||
"__address__": model.LabelValue("10.0.4.88:9876"), |
||||
"__meta_openstack_loadbalancer_id": model.LabelValue("e83a6d92-7a3e-4567-94b3-20c83b32a75e"), |
||||
"__meta_openstack_loadbalancer_name": model.LabelValue("lb5"), |
||||
"__meta_openstack_loadbalancer_status": model.LabelValue("ACTIVE"), |
||||
"__meta_openstack_loadbalancer_availability_zone": model.LabelValue("az4"), |
||||
"__meta_openstack_loadbalancer_vip": model.LabelValue("10.0.4.88"), |
||||
"__meta_openstack_loadbalancer_provider": model.LabelValue("amphora"), |
||||
"__meta_openstack_project_id": model.LabelValue("a5d3b2e1e6f34cd9a5f7c2f01a6b8e29"), |
||||
}, |
||||
} { |
||||
t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) { |
||||
require.Equal(t, lbls, tg.Targets[i]) |
||||
}) |
||||
} |
||||
} |
||||
|
||||
func TestOpenstackSDLoadBalancerRefreshWithDoneContext(t *testing.T) { |
||||
mock := &OpenstackSDLoadBalancerTestSuite{} |
||||
mock.SetupTest(t) |
||||
|
||||
loadbalancer, _ := mock.openstackAuthSuccess() |
||||
ctx, cancel := context.WithCancel(context.Background()) |
||||
cancel() |
||||
_, err := loadbalancer.refresh(ctx) |
||||
require.ErrorContains(t, err, context.Canceled.Error(), "%q doesn't contain %q", err, context.Canceled) |
||||
} |
||||
Loading…
Reference in new issue