mirror of https://github.com/grafana/loki
feature: geoip stage in promtail (#3493)
Co-authored-by: Ed Welch <edward.welch@grafana.com>pull/8587/head
parent
272ebdec3c
commit
b94c8aaf16
@ -0,0 +1,241 @@ |
||||
package stages |
||||
|
||||
import ( |
||||
"fmt" |
||||
"net" |
||||
"reflect" |
||||
"time" |
||||
|
||||
"github.com/go-kit/log" |
||||
"github.com/go-kit/log/level" |
||||
"github.com/mitchellh/mapstructure" |
||||
"github.com/oschwald/geoip2-golang" |
||||
"github.com/pkg/errors" |
||||
"github.com/prometheus/common/model" |
||||
) |
||||
|
||||
const ( |
||||
ErrEmptyGeoIPStageConfig = "geoip stage config cannot be empty" |
||||
ErrEmptyDBPathGeoIPStageConfig = "db path cannot be empty" |
||||
ErrEmptySourceGeoIPStageConfig = "source cannot be empty" |
||||
ErrEmptyDBTypeGeoIPStageConfig = "db type should be either city or asn" |
||||
) |
||||
|
||||
type GeoIPFields int |
||||
|
||||
const ( |
||||
CITYNAME GeoIPFields = iota |
||||
COUNTRYNAME |
||||
CONTINENTNAME |
||||
CONTINENTCODE |
||||
LOCATION |
||||
POSTALCODE |
||||
TIMEZONE |
||||
SUBDIVISIONNAME |
||||
SUBDIVISIONCODE |
||||
) |
||||
|
||||
var fields = map[GeoIPFields]string{ |
||||
CITYNAME: "geoip_city_name", |
||||
COUNTRYNAME: "geoip_country_name", |
||||
CONTINENTNAME: "geoip_continet_name", |
||||
CONTINENTCODE: "geoip_continent_code", |
||||
LOCATION: "geoip_location", |
||||
POSTALCODE: "geoip_postal_code", |
||||
TIMEZONE: "geoip_timezone", |
||||
SUBDIVISIONNAME: "geoip_subdivision_name", |
||||
SUBDIVISIONCODE: "geoip_subdivision_code", |
||||
} |
||||
|
||||
// GeoIPConfig represents GeoIP stage config
|
||||
type GeoIPConfig struct { |
||||
DB string `mapstructure:"db"` |
||||
Source *string `mapstructure:"source"` |
||||
DBType string `mapstructure:"db_type"` |
||||
} |
||||
|
||||
func validateGeoIPConfig(c *GeoIPConfig) error { |
||||
if c == nil { |
||||
return errors.New(ErrEmptyGeoIPStageConfig) |
||||
} |
||||
|
||||
if c.DB == "" { |
||||
return errors.New(ErrEmptyDBPathGeoIPStageConfig) |
||||
} |
||||
|
||||
if c.Source != nil && *c.Source == "" { |
||||
return errors.New(ErrEmptySourceGeoIPStageConfig) |
||||
} |
||||
|
||||
if c.DBType == "" { |
||||
return errors.New(ErrEmptyDBTypeGeoIPStageConfig) |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
func newGeoIPStage(logger log.Logger, configs interface{}) (Stage, error) { |
||||
cfgs := &GeoIPConfig{} |
||||
err := mapstructure.Decode(configs, cfgs) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
err = validateGeoIPConfig(cfgs) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
db, err := geoip2.Open(cfgs.DB) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
return &geoIPStage{ |
||||
db: db, |
||||
logger: logger, |
||||
cfgs: cfgs, |
||||
}, nil |
||||
} |
||||
|
||||
type geoIPStage struct { |
||||
logger log.Logger |
||||
db *geoip2.Reader |
||||
cfgs *GeoIPConfig |
||||
} |
||||
|
||||
// Run implements Stage
|
||||
func (g *geoIPStage) Run(in chan Entry) chan Entry { |
||||
out := make(chan Entry) |
||||
go func() { |
||||
defer close(out) |
||||
defer g.close() |
||||
for e := range in { |
||||
g.process(e.Labels, e.Extracted, &e.Timestamp, &e.Entry.Line) |
||||
out <- e |
||||
} |
||||
}() |
||||
return out |
||||
} |
||||
|
||||
// Name implements Stage
|
||||
func (g *geoIPStage) Name() string { |
||||
return StageTypeGeoIP |
||||
} |
||||
|
||||
func (g *geoIPStage) process(labels model.LabelSet, extracted map[string]interface{}, t *time.Time, entry *string) { |
||||
var ip net.IP |
||||
if g.cfgs.Source != nil { |
||||
if _, ok := extracted[*g.cfgs.Source]; !ok { |
||||
if Debug { |
||||
level.Debug(g.logger).Log("msg", "source does not exist in the set of extracted values", "source", *g.cfgs.Source) |
||||
} |
||||
return |
||||
} |
||||
|
||||
value, err := getString(extracted[*g.cfgs.Source]) |
||||
if err != nil { |
||||
if Debug { |
||||
level.Debug(g.logger).Log("msg", "failed to convert source value to string", "source", *g.cfgs.Source, "err", err, "type", reflect.TypeOf(extracted[*g.cfgs.Source])) |
||||
} |
||||
return |
||||
} |
||||
ip = net.ParseIP(value) |
||||
} |
||||
switch g.cfgs.DBType { |
||||
case "city": |
||||
record, err := g.db.City(ip) |
||||
if err != nil { |
||||
level.Error(g.logger).Log("msg", "unable to get City record for the ip", "err", err, "ip", ip) |
||||
return |
||||
} |
||||
g.populateLabelsWithCityData(labels, record) |
||||
case "asn": |
||||
record, err := g.db.ASN(ip) |
||||
if err != nil { |
||||
level.Error(g.logger).Log("msg", "unable to get ASN record for the ip", "err", err, "ip", ip) |
||||
return |
||||
} |
||||
g.populateLabelsWithASNData(labels, record) |
||||
default: |
||||
level.Error(g.logger).Log("msg", "unknown database type") |
||||
} |
||||
} |
||||
|
||||
func (g *geoIPStage) close() { |
||||
if err := g.db.Close(); err != nil { |
||||
level.Error(g.logger).Log("msg", "error while closing geoip db", "err", err) |
||||
} |
||||
} |
||||
|
||||
func (g *geoIPStage) populateLabelsWithCityData(labels model.LabelSet, record *geoip2.City) { |
||||
for field, label := range fields { |
||||
switch field { |
||||
case CITYNAME: |
||||
cityName := record.City.Names["en"] |
||||
if cityName != "" { |
||||
labels[model.LabelName(label)] = model.LabelValue(cityName) |
||||
} |
||||
case COUNTRYNAME: |
||||
contryName := record.Country.Names["en"] |
||||
if contryName != "" { |
||||
labels[model.LabelName(label)] = model.LabelValue(contryName) |
||||
} |
||||
case CONTINENTNAME: |
||||
continentName := record.Continent.Names["en"] |
||||
if continentName != "" { |
||||
labels[model.LabelName(label)] = model.LabelValue(continentName) |
||||
} |
||||
case CONTINENTCODE: |
||||
continentCode := record.Continent.Code |
||||
if continentCode != "" { |
||||
labels[model.LabelName(label)] = model.LabelValue(continentCode) |
||||
} |
||||
case POSTALCODE: |
||||
postalCode := record.Postal.Code |
||||
if postalCode != "" { |
||||
labels[model.LabelName(label)] = model.LabelValue(postalCode) |
||||
} |
||||
case TIMEZONE: |
||||
timezone := record.Location.TimeZone |
||||
if timezone != "" { |
||||
labels[model.LabelName(label)] = model.LabelValue(timezone) |
||||
} |
||||
case LOCATION: |
||||
latitude := record.Location.Latitude |
||||
longitude := record.Location.Longitude |
||||
if latitude != 0 || longitude != 0 { |
||||
labels[model.LabelName(fmt.Sprintf("%s_latitude", label))] = model.LabelValue(fmt.Sprint(latitude)) |
||||
labels[model.LabelName(fmt.Sprintf("%s_longitude", label))] = model.LabelValue(fmt.Sprint(longitude)) |
||||
} |
||||
case SUBDIVISIONNAME: |
||||
if len(record.Subdivisions) > 0 { |
||||
// we get most specific subdivision https://dev.maxmind.com/release-note/most-specific-subdivision-attribute-added/
|
||||
subdivisionName := record.Subdivisions[len(record.Subdivisions)-1].Names["en"] |
||||
if subdivisionName != "" { |
||||
labels[model.LabelName(label)] = model.LabelValue(subdivisionName) |
||||
} |
||||
} |
||||
case SUBDIVISIONCODE: |
||||
if len(record.Subdivisions) > 0 { |
||||
subdivisionCode := record.Subdivisions[len(record.Subdivisions)-1].IsoCode |
||||
if subdivisionCode != "" { |
||||
labels[model.LabelName(label)] = model.LabelValue(subdivisionCode) |
||||
} |
||||
} |
||||
default: |
||||
level.Error(g.logger).Log("msg", "unknown geoip field") |
||||
} |
||||
} |
||||
} |
||||
|
||||
func (g *geoIPStage) populateLabelsWithASNData(labels model.LabelSet, record *geoip2.ASN) { |
||||
autonomousSystemNumber := record.AutonomousSystemNumber |
||||
autonomousSystemOrganization := record.AutonomousSystemOrganization |
||||
if autonomousSystemNumber != 0 { |
||||
labels[model.LabelName("geoip_autonomous_system_number")] = model.LabelValue(fmt.Sprint(autonomousSystemNumber)) |
||||
} |
||||
if autonomousSystemOrganization != "" { |
||||
labels[model.LabelName("geoip_autonomous_system_organization")] = model.LabelValue(autonomousSystemOrganization) |
||||
} |
||||
} |
||||
@ -0,0 +1,55 @@ |
||||
package stages |
||||
|
||||
import ( |
||||
"testing" |
||||
|
||||
"github.com/pkg/errors" |
||||
"github.com/stretchr/testify/require" |
||||
) |
||||
|
||||
func Test_ValidateConfigs(t *testing.T) { |
||||
source := "ip" |
||||
tests := []struct { |
||||
config GeoIPConfig |
||||
wantError error |
||||
}{ |
||||
{ |
||||
GeoIPConfig{ |
||||
DB: "test", |
||||
Source: &source, |
||||
DBType: "city", |
||||
}, |
||||
nil, |
||||
}, |
||||
{ |
||||
GeoIPConfig{ |
||||
Source: &source, |
||||
DBType: "city", |
||||
}, |
||||
errors.New(ErrEmptyDBPathGeoIPStageConfig), |
||||
}, |
||||
{ |
||||
GeoIPConfig{ |
||||
DB: "test", |
||||
DBType: "city", |
||||
}, |
||||
errors.New(ErrEmptySourceGeoIPStageConfig), |
||||
}, |
||||
{ |
||||
GeoIPConfig{ |
||||
DB: "test", |
||||
Source: &source, |
||||
}, |
||||
errors.New(ErrEmptyDBTypeGeoIPStageConfig), |
||||
}, |
||||
} |
||||
for _, tt := range tests { |
||||
err := validateGeoIPConfig(&tt.config) |
||||
if err != nil { |
||||
require.Equal(t, tt.wantError.Error(), err.Error()) |
||||
} |
||||
if tt.wantError == nil { |
||||
require.Nil(t, err) |
||||
} |
||||
} |
||||
} |
||||
@ -0,0 +1,130 @@ |
||||
--- |
||||
title: geoip |
||||
description: geoip stage |
||||
--- |
||||
# geoip |
||||
|
||||
The `geoip` stage is a parsing stage that reads an ip address and |
||||
populates the labelset with geoip fields. Maxmind's GeoIP2 databse is used for the lookup. |
||||
|
||||
Populated fields for City db: |
||||
|
||||
- geoip_city_name |
||||
- geoip_country_name |
||||
- geoip_continet_name |
||||
- geoip_continent_code |
||||
- geoip_location_latitude |
||||
- geoip_location_longitude |
||||
- geoip_postal_code |
||||
- geoip_timezone |
||||
- geoip_subdivision_name |
||||
- geoip_subdivision_code |
||||
|
||||
Populated fields for ASN (Autonomous System Number) db: |
||||
|
||||
- geoip_autonomous_system_number |
||||
- geoip_autonomous_system_organization |
||||
|
||||
## Schema |
||||
|
||||
```yaml |
||||
geoip: |
||||
# Path to the Maxmind DB file |
||||
[db: <string>] |
||||
|
||||
# IP from extracted data to parse. |
||||
[source: <string>] |
||||
|
||||
# Maxmind DB type. Allowed values are "city", "asn" |
||||
[db_type: <string>] |
||||
``` |
||||
|
||||
## GeoIP with City database example |
||||
|
||||
For the given pipeline |
||||
|
||||
```yaml |
||||
- regex: |
||||
expression: "^(?P<ip>\S+) .*" |
||||
- geoip: |
||||
db: "/path/to/GeoIP2-City.mmdb" |
||||
source: "ip" |
||||
db_type: "city" |
||||
``` |
||||
|
||||
And the log line: |
||||
|
||||
``` |
||||
"34.120.177.193 - "POST /loki/api/push/ HTTP/1.1" 200 932 "-" "Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7 GTB6" |
||||
``` |
||||
|
||||
The `regex` stage parses the log line and `ip` is extracted. Then the extracted `ip` value is given as `source` to `geoip` stage. The `geoip` stage performs a lookup on the `ip` and populates the following labels: |
||||
|
||||
- `geoip_city_name`: `Kansas City` |
||||
- `geoip_country_name`: `United States` |
||||
- `geoip_continet_name`: `North America` |
||||
- `geoip_continent_code`: `NA` |
||||
- `geoip_location_latitude`: `"39.1027` |
||||
- `geoip_location_longitude`: `-94.5778` |
||||
- `geoip_postal_code`: `64184` |
||||
- `geoip_timezone`: `America/Chicago` |
||||
- `geoip_subdivision_name`: `Missouri` |
||||
- `geoip_subdivision_code`: `MO` |
||||
|
||||
If only a subset of these labels are required, you can chain the above pipeline with the `labeldrop` or `labelallow` stage. |
||||
|
||||
### labelallow example |
||||
|
||||
```yaml |
||||
- regex: |
||||
expression: "^(?P<ip>\S+) .*" |
||||
- geoip: |
||||
db: "/path/to/GeoCity.mmdb" |
||||
source: "ip" |
||||
db_type: "city" |
||||
- labelallow: |
||||
- geoip_city_name |
||||
- geoip_country_name |
||||
- geoip_location_latitude |
||||
- geoip_location_longitude |
||||
``` |
||||
|
||||
Only the labels listed under `labelallow` will be sent to Loki. |
||||
|
||||
### labeldrop example |
||||
|
||||
```yaml |
||||
- regex: |
||||
expression: "^(?P<ip>\S+) .*" |
||||
- geoip: |
||||
db: "/path/to/GeoCity.mmdb" |
||||
source: "ip" |
||||
db_type: "city" |
||||
- labeldrop: |
||||
- geoip_postal_code |
||||
- geoip_subdivision_code |
||||
``` |
||||
|
||||
All the labels except the ones listed under `labeldrop` will be sent to Loki. |
||||
|
||||
## GeoIP with ASN (Autonomous System Number) database example |
||||
|
||||
```yaml |
||||
- regex: |
||||
expression: "^(?P<ip>\S+) .*" |
||||
- geoip: |
||||
db: "/path/to/GeoIP2-ASN.mmdb" |
||||
source: "ip" |
||||
db_type: "asn" |
||||
``` |
||||
|
||||
And the log line: |
||||
|
||||
``` |
||||
"34.120.177.193 - "POST /loki/api/push/ HTTP/1.1" 200 932 "-" "Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7 GTB6" |
||||
``` |
||||
|
||||
The `regex` stage parses the log line and `ip` is extracted. Then the extracted `ip` value is given as `source` to `geoip` stage. The `geoip` stage performs a lookup on the `ip` and populates the following labels: |
||||
|
||||
- `geoip_autonomous_system_number`: `396982` |
||||
- `geoip_autonomous_system_organization`: `GOOGLE-CLOUD-PLATFORM` |
||||
@ -0,0 +1,3 @@ |
||||
.vscode |
||||
*.out |
||||
*.test |
||||
@ -0,0 +1,3 @@ |
||||
[submodule "test-data"] |
||||
path = test-data |
||||
url = https://github.com/maxmind/MaxMind-DB.git |
||||
@ -0,0 +1,472 @@ |
||||
[run] |
||||
deadline = "10m" |
||||
|
||||
tests = true |
||||
|
||||
[linters] |
||||
disable-all = true |
||||
enable = [ |
||||
"asciicheck", |
||||
"bidichk", |
||||
"bodyclose", |
||||
"containedctx", |
||||
"contextcheck", |
||||
"deadcode", |
||||
"depguard", |
||||
"durationcheck", |
||||
"errcheck", |
||||
"errchkjson", |
||||
"errname", |
||||
"errorlint", |
||||
"exportloopref", |
||||
"forbidigo", |
||||
#"forcetypeassert", |
||||
"goconst", |
||||
"gocyclo", |
||||
"gocritic", |
||||
"godot", |
||||
"gofumpt", |
||||
"gomodguard", |
||||
"gosec", |
||||
"gosimple", |
||||
"govet", |
||||
"grouper", |
||||
"ineffassign", |
||||
"lll", |
||||
"makezero", |
||||
"maintidx", |
||||
"misspell", |
||||
"nakedret", |
||||
"nilerr", |
||||
"noctx", |
||||
"nolintlint", |
||||
"nosprintfhostport", |
||||
"predeclared", |
||||
"revive", |
||||
"rowserrcheck", |
||||
"sqlclosecheck", |
||||
"staticcheck", |
||||
"structcheck", |
||||
"stylecheck", |
||||
"tenv", |
||||
"tparallel", |
||||
"typecheck", |
||||
"unconvert", |
||||
"unparam", |
||||
"unused", |
||||
"varcheck", |
||||
"vetshadow", |
||||
"wastedassign", |
||||
] |
||||
|
||||
# Please note that we only use depguard for stdlib as gomodguard only |
||||
# supports modules currently. See https://github.com/ryancurrah/gomodguard/issues/12 |
||||
[linters-settings.depguard] |
||||
list-type = "blacklist" |
||||
include-go-root = true |
||||
packages = [ |
||||
# ioutil is deprecated. The functions have been moved elsewhere: |
||||
# https://golang.org/doc/go1.16#ioutil |
||||
"io/ioutil", |
||||
] |
||||
|
||||
[linters-settings.errcheck] |
||||
# Don't allow setting of error to the blank identifier. If there is a legtimate |
||||
# reason, there should be a nolint with an explanation. |
||||
check-blank = true |
||||
|
||||
exclude-functions = [ |
||||
# If we are rolling back a transaction, we are often already in an error |
||||
# state. |
||||
'(*database/sql.Tx).Rollback', |
||||
|
||||
# It is reasonable to ignore errors if Cleanup fails in most cases. |
||||
'(*github.com/google/renameio/v2.PendingFile).Cleanup', |
||||
|
||||
# We often don't care if removing a file failed (e.g., it doesn't exist) |
||||
'os.Remove', |
||||
'os.RemoveAll', |
||||
] |
||||
|
||||
# Ignoring Close so that we don't have to have a bunch of |
||||
# `defer func() { _ = r.Close() }()` constructs when we |
||||
# don't actually care about the error. |
||||
ignore = "Close,fmt:.*" |
||||
|
||||
[linters-settings.errorlint] |
||||
errorf = true |
||||
asserts = true |
||||
comparison = true |
||||
|
||||
[linters-settings.exhaustive] |
||||
default-signifies-exhaustive = true |
||||
|
||||
[linters-settings.forbidigo] |
||||
# Forbid the following identifiers |
||||
forbid = [ |
||||
"^minFraud*", |
||||
"^maxMind*", |
||||
] |
||||
|
||||
[linters-settings.gocritic] |
||||
enabled-checks = [ |
||||
"appendAssign", |
||||
"appendCombine", |
||||
"argOrder", |
||||
"assignOp", |
||||
"badCall", |
||||
"badCond", |
||||
"badLock", |
||||
"badRegexp", |
||||
"badSorting", |
||||
"boolExprSimplify", |
||||
"builtinShadow", |
||||
"builtinShadowDecl", |
||||
"captLocal", |
||||
"caseOrder", |
||||
"codegenComment", |
||||
"commentedOutCode", |
||||
"commentedOutImport", |
||||
"commentFormatting", |
||||
"defaultCaseOrder", |
||||
# Revive's defer rule already captures this. This caught no extra cases. |
||||
# "deferInLoop", |
||||
"deferUnlambda", |
||||
"deprecatedComment", |
||||
"docStub", |
||||
"dupArg", |
||||
"dupBranchBody", |
||||
"dupCase", |
||||
"dupImport", |
||||
"dupSubExpr", |
||||
"dynamicFmtString", |
||||
"elseif", |
||||
"emptyDecl", |
||||
"emptyFallthrough", |
||||
"emptyStringTest", |
||||
"equalFold", |
||||
"evalOrder", |
||||
"exitAfterDefer", |
||||
"exposedSyncMutex", |
||||
"externalErrorReassign", |
||||
# Given that all of our code runs on Linux and the / separate should |
||||
# work fine, this seems less important. |
||||
# "filepathJoin", |
||||
"flagDeref", |
||||
"flagName", |
||||
"hexLiteral", |
||||
"ifElseChain", |
||||
"importShadow", |
||||
"indexAlloc", |
||||
"initClause", |
||||
"ioutilDeprecated", |
||||
"mapKey", |
||||
"methodExprCall", |
||||
"nestingReduce", |
||||
"newDeref", |
||||
"nilValReturn", |
||||
"octalLiteral", |
||||
"offBy1", |
||||
"paramTypeCombine", |
||||
"preferDecodeRune", |
||||
"preferFilepathJoin", |
||||
"preferFprint", |
||||
"preferStringWriter", |
||||
"preferWriteByte", |
||||
"ptrToRefParam", |
||||
"rangeExprCopy", |
||||
"rangeValCopy", |
||||
"redundantSprint", |
||||
"regexpMust", |
||||
"regexpPattern", |
||||
# This might be good, but I don't think we want to encourage |
||||
# significant changes to regexes as we port stuff from Perl. |
||||
# "regexpSimplify", |
||||
"ruleguard", |
||||
"singleCaseSwitch", |
||||
"sliceClear", |
||||
"sloppyLen", |
||||
# This seems like it might also be good, but a lot of existing code |
||||
# fails. |
||||
# "sloppyReassign", |
||||
"returnAfterHttpError", |
||||
"sloppyTypeAssert", |
||||
"sortSlice", |
||||
"sprintfQuotedString", |
||||
"sqlQuery", |
||||
"stringsCompare", |
||||
"stringXbytes", |
||||
"switchTrue", |
||||
"syncMapLoadAndDelete", |
||||
"timeExprSimplify", |
||||
"todoCommentWithoutDetail", |
||||
"tooManyResultsChecker", |
||||
"truncateCmp", |
||||
"typeAssertChain", |
||||
"typeDefFirst", |
||||
"typeSwitchVar", |
||||
"typeUnparen", |
||||
"underef", |
||||
"unlabelStmt", |
||||
"unlambda", |
||||
# I am not sure we would want this linter and a lot of existing |
||||
# code fails. |
||||
# "unnamedResult", |
||||
"unnecessaryBlock", |
||||
"unnecessaryDefer", |
||||
"unslice", |
||||
"valSwap", |
||||
"weakCond", |
||||
"wrapperFunc", |
||||
"yodaStyleExpr", |
||||
# This requires explanations for "nolint" directives. This would be |
||||
# nice for gosec ones, but I am not sure we want it generally unless |
||||
# we can get the false positive rate lower. |
||||
# "whyNoLint" |
||||
] |
||||
|
||||
[linters-settings.gofumpt] |
||||
extra-rules = true |
||||
lang-version = "1.18" |
||||
|
||||
[linters-settings.govet] |
||||
"enable-all" = true |
||||
|
||||
[linters-settings.lll] |
||||
line-length = 120 |
||||
tab-width = 4 |
||||
|
||||
[linters-settings.nolintlint] |
||||
allow-leading-space = false |
||||
allow-unused = false |
||||
allow-no-explanation = ["lll", "misspell"] |
||||
require-explanation = true |
||||
require-specific = true |
||||
|
||||
[linters-settings.revive] |
||||
ignore-generated-header = true |
||||
severity = "warning" |
||||
|
||||
# This might be nice but it is so common that it is hard |
||||
# to enable. |
||||
# [[linters-settings.revive.rules]] |
||||
# name = "add-constant" |
||||
|
||||
# [[linters-settings.revive.rules]] |
||||
# name = "argument-limit" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "atomic" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "bare-return" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "blank-imports" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "bool-literal-in-expr" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "call-to-gc" |
||||
|
||||
# [[linters-settings.revive.rules]] |
||||
# name = "cognitive-complexity" |
||||
|
||||
# Probably a good rule, but we have a lot of names that |
||||
# only have case differences. |
||||
# [[linters-settings.revive.rules]] |
||||
# name = "confusing-naming" |
||||
|
||||
# [[linters-settings.revive.rules]] |
||||
# name = "confusing-results" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "constant-logical-expr" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "context-as-argument" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "context-keys-type" |
||||
|
||||
# [[linters-settings.revive.rules]] |
||||
# name = "cyclomatic" |
||||
|
||||
# [[linters-settings.revive.rules]] |
||||
# name = "deep-exit" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "defer" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "dot-imports" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "duplicated-imports" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "early-return" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "empty-block" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "empty-lines" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "errorf" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "error-naming" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "error-return" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "error-strings" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "exported" |
||||
|
||||
# [[linters-settings.revive.rules]] |
||||
# name = "file-header" |
||||
|
||||
# We have a lot of flag parameters. This linter probably makes |
||||
# a good point, but we would need some cleanup or a lot of nolints. |
||||
# [[linters-settings.revive.rules]] |
||||
# name = "flag-parameter" |
||||
|
||||
# [[linters-settings.revive.rules]] |
||||
# name = "function-result-limit" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "get-return" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "identical-branches" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "if-return" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "imports-blacklist" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "import-shadowing" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "increment-decrement" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "indent-error-flow" |
||||
|
||||
# [[linters-settings.revive.rules]] |
||||
# name = "line-length-limit" |
||||
|
||||
# [[linters-settings.revive.rules]] |
||||
# name = "max-public-structs" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "modifies-parameter" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "modifies-value-receiver" |
||||
|
||||
# We frequently use nested structs, particularly in tests. |
||||
# [[linters-settings.revive.rules]] |
||||
# name = "nested-structs" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "optimize-operands-order" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "package-comments" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "range" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "range-val-address" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "range-val-in-closure" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "receiver-naming" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "redefines-builtin-id" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "string-of-int" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "struct-tag" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "superfluous-else" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "time-naming" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "unconditional-recursion" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "unexported-naming" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "unexported-return" |
||||
|
||||
# This is covered elsewhere and we want to ignore some |
||||
# functions such as fmt.Fprintf. |
||||
# [[linters-settings.revive.rules]] |
||||
# name = "unhandled-error" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "unnecessary-stmt" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "unreachable-code" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "unused-parameter" |
||||
|
||||
# We generally have unused receivers in tests for meeting the |
||||
# requirements of an interface. |
||||
# [[linters-settings.revive.rules]] |
||||
# name = "unused-receiver" |
||||
|
||||
# This probably makes sense after we upgrade to 1.18 |
||||
# [[linters-settings.revive.rules]] |
||||
# name = "use-any" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "useless-break" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "var-declaration" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "var-naming" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "waitgroup-by-value" |
||||
|
||||
[linters-settings.unparam] |
||||
check-exported = true |
||||
|
||||
[[issues.exclude-rules]] |
||||
linters = [ |
||||
"govet" |
||||
] |
||||
# we want to enable almost all govet rules. It is easier to just filter out |
||||
# the ones we don't want: |
||||
# |
||||
# * fieldalignment - way too noisy. Although it is very useful in particular |
||||
# cases where we are trying to use as little memory as possible, having |
||||
# it go off on every struct isn't helpful. |
||||
# * shadow - although often useful, it complains about _many_ err |
||||
# shadowing assignments and some others where shadowing is clear. |
||||
text = "^(fieldalignment|shadow)" |
||||
@ -0,0 +1,15 @@ |
||||
ISC License |
||||
|
||||
Copyright (c) 2015, Gregory J. Oschwald <oschwald@gmail.com> |
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any |
||||
purpose with or without fee is hereby granted, provided that the above |
||||
copyright notice and this permission notice appear in all copies. |
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH |
||||
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY |
||||
AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, |
||||
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM |
||||
LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR |
||||
OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR |
||||
PERFORMANCE OF THIS SOFTWARE. |
||||
@ -0,0 +1,93 @@ |
||||
# GeoIP2 Reader for Go # |
||||
|
||||
[](https://pkg.go.dev/github.com/oschwald/geoip2-golang) |
||||
|
||||
This library reads MaxMind [GeoLite2](http://dev.maxmind.com/geoip/geoip2/geolite2/) |
||||
and [GeoIP2](http://www.maxmind.com/en/geolocation_landing) databases. |
||||
|
||||
This library is built using |
||||
[the Go maxminddb reader](https://github.com/oschwald/maxminddb-golang). |
||||
All data for the database record is decoded using this library. If you only |
||||
need several fields, you may get superior performance by using maxminddb's |
||||
`Lookup` directly with a result struct that only contains the required fields. |
||||
(See [example_test.go](https://github.com/oschwald/maxminddb-golang/blob/main/example_test.go) |
||||
in the maxminddb repository for an example of this.) |
||||
|
||||
## Installation ## |
||||
|
||||
``` |
||||
go get github.com/oschwald/geoip2-golang |
||||
``` |
||||
|
||||
## Usage ## |
||||
|
||||
[See GoDoc](http://godoc.org/github.com/oschwald/geoip2-golang) for |
||||
documentation and examples. |
||||
|
||||
## Example ## |
||||
|
||||
```go |
||||
package main |
||||
|
||||
import ( |
||||
"fmt" |
||||
"log" |
||||
"net" |
||||
|
||||
"github.com/oschwald/geoip2-golang" |
||||
) |
||||
|
||||
func main() { |
||||
db, err := geoip2.Open("GeoIP2-City.mmdb") |
||||
if err != nil { |
||||
log.Fatal(err) |
||||
} |
||||
defer db.Close() |
||||
// If you are using strings that may be invalid, check that ip is not nil |
||||
ip := net.ParseIP("81.2.69.142") |
||||
record, err := db.City(ip) |
||||
if err != nil { |
||||
log.Fatal(err) |
||||
} |
||||
fmt.Printf("Portuguese (BR) city name: %v\n", record.City.Names["pt-BR"]) |
||||
if len(record.Subdivisions) > 0 { |
||||
fmt.Printf("English subdivision name: %v\n", record.Subdivisions[0].Names["en"]) |
||||
} |
||||
fmt.Printf("Russian country name: %v\n", record.Country.Names["ru"]) |
||||
fmt.Printf("ISO country code: %v\n", record.Country.IsoCode) |
||||
fmt.Printf("Time zone: %v\n", record.Location.TimeZone) |
||||
fmt.Printf("Coordinates: %v, %v\n", record.Location.Latitude, record.Location.Longitude) |
||||
// Output: |
||||
// Portuguese (BR) city name: Londres |
||||
// English subdivision name: England |
||||
// Russian country name: Великобритания |
||||
// ISO country code: GB |
||||
// Time zone: Europe/London |
||||
// Coordinates: 51.5142, -0.0931 |
||||
} |
||||
|
||||
``` |
||||
|
||||
## Testing ## |
||||
|
||||
Make sure you checked out test data submodule: |
||||
|
||||
``` |
||||
git submodule init |
||||
git submodule update |
||||
``` |
||||
|
||||
Execute test suite: |
||||
|
||||
``` |
||||
go test |
||||
``` |
||||
|
||||
## Contributing ## |
||||
|
||||
Contributions welcome! Please fork the repository and open a pull request |
||||
with your changes. |
||||
|
||||
## License ## |
||||
|
||||
This is free software, licensed under the ISC license. |
||||
@ -0,0 +1,418 @@ |
||||
// Package geoip2 provides an easy-to-use API for the MaxMind GeoIP2 and
|
||||
// GeoLite2 databases; this package does not support GeoIP Legacy databases.
|
||||
//
|
||||
// The structs provided by this package match the internal structure of
|
||||
// the data in the MaxMind databases.
|
||||
//
|
||||
// See github.com/oschwald/maxminddb-golang for more advanced used cases.
|
||||
package geoip2 |
||||
|
||||
import ( |
||||
"fmt" |
||||
"net" |
||||
|
||||
"github.com/oschwald/maxminddb-golang" |
||||
) |
||||
|
||||
// The Enterprise struct corresponds to the data in the GeoIP2 Enterprise
|
||||
// database.
|
||||
type Enterprise struct { |
||||
City struct { |
||||
Confidence uint8 `maxminddb:"confidence"` |
||||
GeoNameID uint `maxminddb:"geoname_id"` |
||||
Names map[string]string `maxminddb:"names"` |
||||
} `maxminddb:"city"` |
||||
Continent struct { |
||||
Code string `maxminddb:"code"` |
||||
GeoNameID uint `maxminddb:"geoname_id"` |
||||
Names map[string]string `maxminddb:"names"` |
||||
} `maxminddb:"continent"` |
||||
Country struct { |
||||
GeoNameID uint `maxminddb:"geoname_id"` |
||||
IsoCode string `maxminddb:"iso_code"` |
||||
Names map[string]string `maxminddb:"names"` |
||||
Confidence uint8 `maxminddb:"confidence"` |
||||
IsInEuropeanUnion bool `maxminddb:"is_in_european_union"` |
||||
} `maxminddb:"country"` |
||||
Location struct { |
||||
AccuracyRadius uint16 `maxminddb:"accuracy_radius"` |
||||
Latitude float64 `maxminddb:"latitude"` |
||||
Longitude float64 `maxminddb:"longitude"` |
||||
MetroCode uint `maxminddb:"metro_code"` |
||||
TimeZone string `maxminddb:"time_zone"` |
||||
} `maxminddb:"location"` |
||||
Postal struct { |
||||
Code string `maxminddb:"code"` |
||||
Confidence uint8 `maxminddb:"confidence"` |
||||
} `maxminddb:"postal"` |
||||
RegisteredCountry struct { |
||||
GeoNameID uint `maxminddb:"geoname_id"` |
||||
IsoCode string `maxminddb:"iso_code"` |
||||
Names map[string]string `maxminddb:"names"` |
||||
Confidence uint8 `maxminddb:"confidence"` |
||||
IsInEuropeanUnion bool `maxminddb:"is_in_european_union"` |
||||
} `maxminddb:"registered_country"` |
||||
RepresentedCountry struct { |
||||
GeoNameID uint `maxminddb:"geoname_id"` |
||||
IsInEuropeanUnion bool `maxminddb:"is_in_european_union"` |
||||
IsoCode string `maxminddb:"iso_code"` |
||||
Names map[string]string `maxminddb:"names"` |
||||
Type string `maxminddb:"type"` |
||||
} `maxminddb:"represented_country"` |
||||
Subdivisions []struct { |
||||
Confidence uint8 `maxminddb:"confidence"` |
||||
GeoNameID uint `maxminddb:"geoname_id"` |
||||
IsoCode string `maxminddb:"iso_code"` |
||||
Names map[string]string `maxminddb:"names"` |
||||
} `maxminddb:"subdivisions"` |
||||
Traits struct { |
||||
AutonomousSystemNumber uint `maxminddb:"autonomous_system_number"` |
||||
AutonomousSystemOrganization string `maxminddb:"autonomous_system_organization"` |
||||
ConnectionType string `maxminddb:"connection_type"` |
||||
Domain string `maxminddb:"domain"` |
||||
IsAnonymousProxy bool `maxminddb:"is_anonymous_proxy"` |
||||
IsLegitimateProxy bool `maxminddb:"is_legitimate_proxy"` |
||||
IsSatelliteProvider bool `maxminddb:"is_satellite_provider"` |
||||
ISP string `maxminddb:"isp"` |
||||
MobileCountryCode string `maxminddb:"mobile_country_code"` |
||||
MobileNetworkCode string `maxminddb:"mobile_network_code"` |
||||
Organization string `maxminddb:"organization"` |
||||
StaticIPScore float64 `maxminddb:"static_ip_score"` |
||||
UserType string `maxminddb:"user_type"` |
||||
} `maxminddb:"traits"` |
||||
} |
||||
|
||||
// The City struct corresponds to the data in the GeoIP2/GeoLite2 City
|
||||
// databases.
|
||||
type City struct { |
||||
City struct { |
||||
GeoNameID uint `maxminddb:"geoname_id"` |
||||
Names map[string]string `maxminddb:"names"` |
||||
} `maxminddb:"city"` |
||||
Continent struct { |
||||
Code string `maxminddb:"code"` |
||||
GeoNameID uint `maxminddb:"geoname_id"` |
||||
Names map[string]string `maxminddb:"names"` |
||||
} `maxminddb:"continent"` |
||||
Country struct { |
||||
GeoNameID uint `maxminddb:"geoname_id"` |
||||
IsInEuropeanUnion bool `maxminddb:"is_in_european_union"` |
||||
IsoCode string `maxminddb:"iso_code"` |
||||
Names map[string]string `maxminddb:"names"` |
||||
} `maxminddb:"country"` |
||||
Location struct { |
||||
AccuracyRadius uint16 `maxminddb:"accuracy_radius"` |
||||
Latitude float64 `maxminddb:"latitude"` |
||||
Longitude float64 `maxminddb:"longitude"` |
||||
MetroCode uint `maxminddb:"metro_code"` |
||||
TimeZone string `maxminddb:"time_zone"` |
||||
} `maxminddb:"location"` |
||||
Postal struct { |
||||
Code string `maxminddb:"code"` |
||||
} `maxminddb:"postal"` |
||||
RegisteredCountry struct { |
||||
GeoNameID uint `maxminddb:"geoname_id"` |
||||
IsInEuropeanUnion bool `maxminddb:"is_in_european_union"` |
||||
IsoCode string `maxminddb:"iso_code"` |
||||
Names map[string]string `maxminddb:"names"` |
||||
} `maxminddb:"registered_country"` |
||||
RepresentedCountry struct { |
||||
GeoNameID uint `maxminddb:"geoname_id"` |
||||
IsInEuropeanUnion bool `maxminddb:"is_in_european_union"` |
||||
IsoCode string `maxminddb:"iso_code"` |
||||
Names map[string]string `maxminddb:"names"` |
||||
Type string `maxminddb:"type"` |
||||
} `maxminddb:"represented_country"` |
||||
Subdivisions []struct { |
||||
GeoNameID uint `maxminddb:"geoname_id"` |
||||
IsoCode string `maxminddb:"iso_code"` |
||||
Names map[string]string `maxminddb:"names"` |
||||
} `maxminddb:"subdivisions"` |
||||
Traits struct { |
||||
IsAnonymousProxy bool `maxminddb:"is_anonymous_proxy"` |
||||
IsSatelliteProvider bool `maxminddb:"is_satellite_provider"` |
||||
} `maxminddb:"traits"` |
||||
} |
||||
|
||||
// The Country struct corresponds to the data in the GeoIP2/GeoLite2
|
||||
// Country databases.
|
||||
type Country struct { |
||||
Continent struct { |
||||
Code string `maxminddb:"code"` |
||||
GeoNameID uint `maxminddb:"geoname_id"` |
||||
Names map[string]string `maxminddb:"names"` |
||||
} `maxminddb:"continent"` |
||||
Country struct { |
||||
GeoNameID uint `maxminddb:"geoname_id"` |
||||
IsInEuropeanUnion bool `maxminddb:"is_in_european_union"` |
||||
IsoCode string `maxminddb:"iso_code"` |
||||
Names map[string]string `maxminddb:"names"` |
||||
} `maxminddb:"country"` |
||||
RegisteredCountry struct { |
||||
GeoNameID uint `maxminddb:"geoname_id"` |
||||
IsInEuropeanUnion bool `maxminddb:"is_in_european_union"` |
||||
IsoCode string `maxminddb:"iso_code"` |
||||
Names map[string]string `maxminddb:"names"` |
||||
} `maxminddb:"registered_country"` |
||||
RepresentedCountry struct { |
||||
GeoNameID uint `maxminddb:"geoname_id"` |
||||
IsInEuropeanUnion bool `maxminddb:"is_in_european_union"` |
||||
IsoCode string `maxminddb:"iso_code"` |
||||
Names map[string]string `maxminddb:"names"` |
||||
Type string `maxminddb:"type"` |
||||
} `maxminddb:"represented_country"` |
||||
Traits struct { |
||||
IsAnonymousProxy bool `maxminddb:"is_anonymous_proxy"` |
||||
IsSatelliteProvider bool `maxminddb:"is_satellite_provider"` |
||||
} `maxminddb:"traits"` |
||||
} |
||||
|
||||
// The AnonymousIP struct corresponds to the data in the GeoIP2
|
||||
// Anonymous IP database.
|
||||
type AnonymousIP struct { |
||||
IsAnonymous bool `maxminddb:"is_anonymous"` |
||||
IsAnonymousVPN bool `maxminddb:"is_anonymous_vpn"` |
||||
IsHostingProvider bool `maxminddb:"is_hosting_provider"` |
||||
IsPublicProxy bool `maxminddb:"is_public_proxy"` |
||||
IsResidentialProxy bool `maxminddb:"is_residential_proxy"` |
||||
IsTorExitNode bool `maxminddb:"is_tor_exit_node"` |
||||
} |
||||
|
||||
// The ASN struct corresponds to the data in the GeoLite2 ASN database.
|
||||
type ASN struct { |
||||
AutonomousSystemNumber uint `maxminddb:"autonomous_system_number"` |
||||
AutonomousSystemOrganization string `maxminddb:"autonomous_system_organization"` |
||||
} |
||||
|
||||
// The ConnectionType struct corresponds to the data in the GeoIP2
|
||||
// Connection-Type database.
|
||||
type ConnectionType struct { |
||||
ConnectionType string `maxminddb:"connection_type"` |
||||
} |
||||
|
||||
// The Domain struct corresponds to the data in the GeoIP2 Domain database.
|
||||
type Domain struct { |
||||
Domain string `maxminddb:"domain"` |
||||
} |
||||
|
||||
// The ISP struct corresponds to the data in the GeoIP2 ISP database.
|
||||
type ISP struct { |
||||
AutonomousSystemNumber uint `maxminddb:"autonomous_system_number"` |
||||
AutonomousSystemOrganization string `maxminddb:"autonomous_system_organization"` |
||||
ISP string `maxminddb:"isp"` |
||||
MobileCountryCode string `maxminddb:"mobile_country_code"` |
||||
MobileNetworkCode string `maxminddb:"mobile_network_code"` |
||||
Organization string `maxminddb:"organization"` |
||||
} |
||||
|
||||
type databaseType int |
||||
|
||||
const ( |
||||
isAnonymousIP = 1 << iota |
||||
isASN |
||||
isCity |
||||
isConnectionType |
||||
isCountry |
||||
isDomain |
||||
isEnterprise |
||||
isISP |
||||
) |
||||
|
||||
// Reader holds the maxminddb.Reader struct. It can be created using the
|
||||
// Open and FromBytes functions.
|
||||
type Reader struct { |
||||
mmdbReader *maxminddb.Reader |
||||
databaseType databaseType |
||||
} |
||||
|
||||
// InvalidMethodError is returned when a lookup method is called on a
|
||||
// database that it does not support. For instance, calling the ISP method
|
||||
// on a City database.
|
||||
type InvalidMethodError struct { |
||||
Method string |
||||
DatabaseType string |
||||
} |
||||
|
||||
func (e InvalidMethodError) Error() string { |
||||
return fmt.Sprintf(`geoip2: the %s method does not support the %s database`, |
||||
e.Method, e.DatabaseType) |
||||
} |
||||
|
||||
// UnknownDatabaseTypeError is returned when an unknown database type is
|
||||
// opened.
|
||||
type UnknownDatabaseTypeError struct { |
||||
DatabaseType string |
||||
} |
||||
|
||||
func (e UnknownDatabaseTypeError) Error() string { |
||||
return fmt.Sprintf(`geoip2: reader does not support the %q database type`, |
||||
e.DatabaseType) |
||||
} |
||||
|
||||
// Open takes a string path to a file and returns a Reader struct or an error.
|
||||
// The database file is opened using a memory map. Use the Close method on the
|
||||
// Reader object to return the resources to the system.
|
||||
func Open(file string) (*Reader, error) { |
||||
reader, err := maxminddb.Open(file) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
dbType, err := getDBType(reader) |
||||
return &Reader{reader, dbType}, err |
||||
} |
||||
|
||||
// FromBytes takes a byte slice corresponding to a GeoIP2/GeoLite2 database
|
||||
// file and returns a Reader struct or an error. Note that the byte slice is
|
||||
// used directly; any modification of it after opening the database will result
|
||||
// in errors while reading from the database.
|
||||
func FromBytes(bytes []byte) (*Reader, error) { |
||||
reader, err := maxminddb.FromBytes(bytes) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
dbType, err := getDBType(reader) |
||||
return &Reader{reader, dbType}, err |
||||
} |
||||
|
||||
func getDBType(reader *maxminddb.Reader) (databaseType, error) { |
||||
switch reader.Metadata.DatabaseType { |
||||
case "GeoIP2-Anonymous-IP": |
||||
return isAnonymousIP, nil |
||||
case "DBIP-ASN-Lite (compat=GeoLite2-ASN)", |
||||
"GeoLite2-ASN": |
||||
return isASN, nil |
||||
// We allow City lookups on Country for back compat
|
||||
case "DBIP-City-Lite", |
||||
"DBIP-Country-Lite", |
||||
"DBIP-Country", |
||||
"DBIP-Location (compat=City)", |
||||
"GeoLite2-City", |
||||
"GeoIP2-City", |
||||
"GeoIP2-City-Africa", |
||||
"GeoIP2-City-Asia-Pacific", |
||||
"GeoIP2-City-Europe", |
||||
"GeoIP2-City-North-America", |
||||
"GeoIP2-City-South-America", |
||||
"GeoIP2-Precision-City", |
||||
"GeoLite2-Country", |
||||
"GeoIP2-Country": |
||||
return isCity | isCountry, nil |
||||
case "GeoIP2-Connection-Type": |
||||
return isConnectionType, nil |
||||
case "GeoIP2-Domain": |
||||
return isDomain, nil |
||||
case "DBIP-ISP (compat=Enterprise)", |
||||
"DBIP-Location-ISP (compat=Enterprise)", |
||||
"GeoIP2-Enterprise": |
||||
return isEnterprise | isCity | isCountry, nil |
||||
case "GeoIP2-ISP", |
||||
"GeoIP2-Precision-ISP": |
||||
return isISP | isASN, nil |
||||
default: |
||||
return 0, UnknownDatabaseTypeError{reader.Metadata.DatabaseType} |
||||
} |
||||
} |
||||
|
||||
// Enterprise takes an IP address as a net.IP struct and returns an Enterprise
|
||||
// struct and/or an error. This is intended to be used with the GeoIP2
|
||||
// Enterprise database.
|
||||
func (r *Reader) Enterprise(ipAddress net.IP) (*Enterprise, error) { |
||||
if isEnterprise&r.databaseType == 0 { |
||||
return nil, InvalidMethodError{"Enterprise", r.Metadata().DatabaseType} |
||||
} |
||||
var enterprise Enterprise |
||||
err := r.mmdbReader.Lookup(ipAddress, &enterprise) |
||||
return &enterprise, err |
||||
} |
||||
|
||||
// City takes an IP address as a net.IP struct and returns a City struct
|
||||
// and/or an error. Although this can be used with other databases, this
|
||||
// method generally should be used with the GeoIP2 or GeoLite2 City databases.
|
||||
func (r *Reader) City(ipAddress net.IP) (*City, error) { |
||||
if isCity&r.databaseType == 0 { |
||||
return nil, InvalidMethodError{"City", r.Metadata().DatabaseType} |
||||
} |
||||
var city City |
||||
err := r.mmdbReader.Lookup(ipAddress, &city) |
||||
return &city, err |
||||
} |
||||
|
||||
// Country takes an IP address as a net.IP struct and returns a Country struct
|
||||
// and/or an error. Although this can be used with other databases, this
|
||||
// method generally should be used with the GeoIP2 or GeoLite2 Country
|
||||
// databases.
|
||||
func (r *Reader) Country(ipAddress net.IP) (*Country, error) { |
||||
if isCountry&r.databaseType == 0 { |
||||
return nil, InvalidMethodError{"Country", r.Metadata().DatabaseType} |
||||
} |
||||
var country Country |
||||
err := r.mmdbReader.Lookup(ipAddress, &country) |
||||
return &country, err |
||||
} |
||||
|
||||
// AnonymousIP takes an IP address as a net.IP struct and returns a
|
||||
// AnonymousIP struct and/or an error.
|
||||
func (r *Reader) AnonymousIP(ipAddress net.IP) (*AnonymousIP, error) { |
||||
if isAnonymousIP&r.databaseType == 0 { |
||||
return nil, InvalidMethodError{"AnonymousIP", r.Metadata().DatabaseType} |
||||
} |
||||
var anonIP AnonymousIP |
||||
err := r.mmdbReader.Lookup(ipAddress, &anonIP) |
||||
return &anonIP, err |
||||
} |
||||
|
||||
// ASN takes an IP address as a net.IP struct and returns a ASN struct and/or
|
||||
// an error.
|
||||
func (r *Reader) ASN(ipAddress net.IP) (*ASN, error) { |
||||
if isASN&r.databaseType == 0 { |
||||
return nil, InvalidMethodError{"ASN", r.Metadata().DatabaseType} |
||||
} |
||||
var val ASN |
||||
err := r.mmdbReader.Lookup(ipAddress, &val) |
||||
return &val, err |
||||
} |
||||
|
||||
// ConnectionType takes an IP address as a net.IP struct and returns a
|
||||
// ConnectionType struct and/or an error.
|
||||
func (r *Reader) ConnectionType(ipAddress net.IP) (*ConnectionType, error) { |
||||
if isConnectionType&r.databaseType == 0 { |
||||
return nil, InvalidMethodError{"ConnectionType", r.Metadata().DatabaseType} |
||||
} |
||||
var val ConnectionType |
||||
err := r.mmdbReader.Lookup(ipAddress, &val) |
||||
return &val, err |
||||
} |
||||
|
||||
// Domain takes an IP address as a net.IP struct and returns a
|
||||
// Domain struct and/or an error.
|
||||
func (r *Reader) Domain(ipAddress net.IP) (*Domain, error) { |
||||
if isDomain&r.databaseType == 0 { |
||||
return nil, InvalidMethodError{"Domain", r.Metadata().DatabaseType} |
||||
} |
||||
var val Domain |
||||
err := r.mmdbReader.Lookup(ipAddress, &val) |
||||
return &val, err |
||||
} |
||||
|
||||
// ISP takes an IP address as a net.IP struct and returns a ISP struct and/or
|
||||
// an error.
|
||||
func (r *Reader) ISP(ipAddress net.IP) (*ISP, error) { |
||||
if isISP&r.databaseType == 0 { |
||||
return nil, InvalidMethodError{"ISP", r.Metadata().DatabaseType} |
||||
} |
||||
var val ISP |
||||
err := r.mmdbReader.Lookup(ipAddress, &val) |
||||
return &val, err |
||||
} |
||||
|
||||
// Metadata takes no arguments and returns a struct containing metadata about
|
||||
// the MaxMind database in use by the Reader.
|
||||
func (r *Reader) Metadata() maxminddb.Metadata { |
||||
return r.mmdbReader.Metadata |
||||
} |
||||
|
||||
// Close unmaps the database file from virtual memory and returns the
|
||||
// resources to the system.
|
||||
func (r *Reader) Close() error { |
||||
return r.mmdbReader.Close() |
||||
} |
||||
@ -0,0 +1,4 @@ |
||||
.vscode |
||||
*.out |
||||
*.sw? |
||||
*.test |
||||
@ -0,0 +1,3 @@ |
||||
[submodule "test-data"] |
||||
path = test-data |
||||
url = https://github.com/maxmind/MaxMind-DB.git |
||||
@ -0,0 +1,472 @@ |
||||
[run] |
||||
deadline = "10m" |
||||
|
||||
tests = true |
||||
|
||||
[linters] |
||||
disable-all = true |
||||
enable = [ |
||||
"asciicheck", |
||||
"bidichk", |
||||
"bodyclose", |
||||
"containedctx", |
||||
"contextcheck", |
||||
"deadcode", |
||||
"depguard", |
||||
"durationcheck", |
||||
"errcheck", |
||||
"errchkjson", |
||||
"errname", |
||||
"errorlint", |
||||
"exportloopref", |
||||
"forbidigo", |
||||
#"forcetypeassert", |
||||
"goconst", |
||||
"gocyclo", |
||||
"gocritic", |
||||
"godot", |
||||
"gofumpt", |
||||
"gomodguard", |
||||
"gosec", |
||||
"gosimple", |
||||
"govet", |
||||
"grouper", |
||||
"ineffassign", |
||||
"lll", |
||||
"makezero", |
||||
"maintidx", |
||||
"misspell", |
||||
"nakedret", |
||||
"nilerr", |
||||
"noctx", |
||||
"nolintlint", |
||||
"nosprintfhostport", |
||||
"predeclared", |
||||
"revive", |
||||
"rowserrcheck", |
||||
"sqlclosecheck", |
||||
"staticcheck", |
||||
"structcheck", |
||||
"stylecheck", |
||||
"tenv", |
||||
"tparallel", |
||||
"typecheck", |
||||
"unconvert", |
||||
"unparam", |
||||
"unused", |
||||
"varcheck", |
||||
"vetshadow", |
||||
"wastedassign", |
||||
] |
||||
|
||||
# Please note that we only use depguard for stdlib as gomodguard only |
||||
# supports modules currently. See https://github.com/ryancurrah/gomodguard/issues/12 |
||||
[linters-settings.depguard] |
||||
list-type = "blacklist" |
||||
include-go-root = true |
||||
packages = [ |
||||
# ioutil is deprecated. The functions have been moved elsewhere: |
||||
# https://golang.org/doc/go1.16#ioutil |
||||
"io/ioutil", |
||||
] |
||||
|
||||
[linters-settings.errcheck] |
||||
# Don't allow setting of error to the blank identifier. If there is a legtimate |
||||
# reason, there should be a nolint with an explanation. |
||||
check-blank = true |
||||
|
||||
exclude-functions = [ |
||||
# If we are rolling back a transaction, we are often already in an error |
||||
# state. |
||||
'(*database/sql.Tx).Rollback', |
||||
|
||||
# It is reasonable to ignore errors if Cleanup fails in most cases. |
||||
'(*github.com/google/renameio/v2.PendingFile).Cleanup', |
||||
|
||||
# We often don't care if removing a file failed (e.g., it doesn't exist) |
||||
'os.Remove', |
||||
'os.RemoveAll', |
||||
] |
||||
|
||||
# Ignoring Close so that we don't have to have a bunch of |
||||
# `defer func() { _ = r.Close() }()` constructs when we |
||||
# don't actually care about the error. |
||||
ignore = "Close,fmt:.*" |
||||
|
||||
[linters-settings.errorlint] |
||||
errorf = true |
||||
asserts = true |
||||
comparison = true |
||||
|
||||
[linters-settings.exhaustive] |
||||
default-signifies-exhaustive = true |
||||
|
||||
[linters-settings.forbidigo] |
||||
# Forbid the following identifiers |
||||
forbid = [ |
||||
"^minFraud*", |
||||
"^maxMind*", |
||||
] |
||||
|
||||
[linters-settings.gocritic] |
||||
enabled-checks = [ |
||||
"appendAssign", |
||||
"appendCombine", |
||||
"argOrder", |
||||
"assignOp", |
||||
"badCall", |
||||
"badCond", |
||||
"badLock", |
||||
"badRegexp", |
||||
"badSorting", |
||||
"boolExprSimplify", |
||||
"builtinShadow", |
||||
"builtinShadowDecl", |
||||
"captLocal", |
||||
"caseOrder", |
||||
"codegenComment", |
||||
"commentedOutCode", |
||||
"commentedOutImport", |
||||
"commentFormatting", |
||||
"defaultCaseOrder", |
||||
# Revive's defer rule already captures this. This caught no extra cases. |
||||
# "deferInLoop", |
||||
"deferUnlambda", |
||||
"deprecatedComment", |
||||
"docStub", |
||||
"dupArg", |
||||
"dupBranchBody", |
||||
"dupCase", |
||||
"dupImport", |
||||
"dupSubExpr", |
||||
"dynamicFmtString", |
||||
"elseif", |
||||
"emptyDecl", |
||||
"emptyFallthrough", |
||||
"emptyStringTest", |
||||
"equalFold", |
||||
"evalOrder", |
||||
"exitAfterDefer", |
||||
"exposedSyncMutex", |
||||
"externalErrorReassign", |
||||
# Given that all of our code runs on Linux and the / separate should |
||||
# work fine, this seems less important. |
||||
# "filepathJoin", |
||||
"flagDeref", |
||||
"flagName", |
||||
"hexLiteral", |
||||
"ifElseChain", |
||||
"importShadow", |
||||
"indexAlloc", |
||||
"initClause", |
||||
"ioutilDeprecated", |
||||
"mapKey", |
||||
"methodExprCall", |
||||
"nestingReduce", |
||||
"newDeref", |
||||
"nilValReturn", |
||||
"octalLiteral", |
||||
"offBy1", |
||||
"paramTypeCombine", |
||||
"preferDecodeRune", |
||||
"preferFilepathJoin", |
||||
"preferFprint", |
||||
"preferStringWriter", |
||||
"preferWriteByte", |
||||
"ptrToRefParam", |
||||
"rangeExprCopy", |
||||
"rangeValCopy", |
||||
"redundantSprint", |
||||
"regexpMust", |
||||
"regexpPattern", |
||||
# This might be good, but I don't think we want to encourage |
||||
# significant changes to regexes as we port stuff from Perl. |
||||
# "regexpSimplify", |
||||
"ruleguard", |
||||
"singleCaseSwitch", |
||||
"sliceClear", |
||||
"sloppyLen", |
||||
# This seems like it might also be good, but a lot of existing code |
||||
# fails. |
||||
# "sloppyReassign", |
||||
"returnAfterHttpError", |
||||
"sloppyTypeAssert", |
||||
"sortSlice", |
||||
"sprintfQuotedString", |
||||
"sqlQuery", |
||||
"stringsCompare", |
||||
"stringXbytes", |
||||
"switchTrue", |
||||
"syncMapLoadAndDelete", |
||||
"timeExprSimplify", |
||||
"todoCommentWithoutDetail", |
||||
"tooManyResultsChecker", |
||||
"truncateCmp", |
||||
"typeAssertChain", |
||||
"typeDefFirst", |
||||
"typeSwitchVar", |
||||
"typeUnparen", |
||||
"underef", |
||||
"unlabelStmt", |
||||
"unlambda", |
||||
# I am not sure we would want this linter and a lot of existing |
||||
# code fails. |
||||
# "unnamedResult", |
||||
"unnecessaryBlock", |
||||
"unnecessaryDefer", |
||||
"unslice", |
||||
"valSwap", |
||||
"weakCond", |
||||
"wrapperFunc", |
||||
"yodaStyleExpr", |
||||
# This requires explanations for "nolint" directives. This would be |
||||
# nice for gosec ones, but I am not sure we want it generally unless |
||||
# we can get the false positive rate lower. |
||||
# "whyNoLint" |
||||
] |
||||
|
||||
[linters-settings.gofumpt] |
||||
extra-rules = true |
||||
lang-version = "1.18" |
||||
|
||||
[linters-settings.govet] |
||||
"enable-all" = true |
||||
|
||||
[linters-settings.lll] |
||||
line-length = 120 |
||||
tab-width = 4 |
||||
|
||||
[linters-settings.nolintlint] |
||||
allow-leading-space = false |
||||
allow-unused = false |
||||
allow-no-explanation = ["lll", "misspell"] |
||||
require-explanation = true |
||||
require-specific = true |
||||
|
||||
[linters-settings.revive] |
||||
ignore-generated-header = true |
||||
severity = "warning" |
||||
|
||||
# This might be nice but it is so common that it is hard |
||||
# to enable. |
||||
# [[linters-settings.revive.rules]] |
||||
# name = "add-constant" |
||||
|
||||
# [[linters-settings.revive.rules]] |
||||
# name = "argument-limit" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "atomic" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "bare-return" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "blank-imports" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "bool-literal-in-expr" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "call-to-gc" |
||||
|
||||
# [[linters-settings.revive.rules]] |
||||
# name = "cognitive-complexity" |
||||
|
||||
# Probably a good rule, but we have a lot of names that |
||||
# only have case differences. |
||||
# [[linters-settings.revive.rules]] |
||||
# name = "confusing-naming" |
||||
|
||||
# [[linters-settings.revive.rules]] |
||||
# name = "confusing-results" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "constant-logical-expr" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "context-as-argument" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "context-keys-type" |
||||
|
||||
# [[linters-settings.revive.rules]] |
||||
# name = "cyclomatic" |
||||
|
||||
# [[linters-settings.revive.rules]] |
||||
# name = "deep-exit" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "defer" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "dot-imports" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "duplicated-imports" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "early-return" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "empty-block" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "empty-lines" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "errorf" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "error-naming" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "error-return" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "error-strings" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "exported" |
||||
|
||||
# [[linters-settings.revive.rules]] |
||||
# name = "file-header" |
||||
|
||||
# We have a lot of flag parameters. This linter probably makes |
||||
# a good point, but we would need some cleanup or a lot of nolints. |
||||
# [[linters-settings.revive.rules]] |
||||
# name = "flag-parameter" |
||||
|
||||
# [[linters-settings.revive.rules]] |
||||
# name = "function-result-limit" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "get-return" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "identical-branches" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "if-return" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "imports-blacklist" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "import-shadowing" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "increment-decrement" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "indent-error-flow" |
||||
|
||||
# [[linters-settings.revive.rules]] |
||||
# name = "line-length-limit" |
||||
|
||||
# [[linters-settings.revive.rules]] |
||||
# name = "max-public-structs" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "modifies-parameter" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "modifies-value-receiver" |
||||
|
||||
# We frequently use nested structs, particularly in tests. |
||||
# [[linters-settings.revive.rules]] |
||||
# name = "nested-structs" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "optimize-operands-order" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "package-comments" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "range" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "range-val-address" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "range-val-in-closure" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "receiver-naming" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "redefines-builtin-id" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "string-of-int" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "struct-tag" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "superfluous-else" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "time-naming" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "unconditional-recursion" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "unexported-naming" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "unexported-return" |
||||
|
||||
# This is covered elsewhere and we want to ignore some |
||||
# functions such as fmt.Fprintf. |
||||
# [[linters-settings.revive.rules]] |
||||
# name = "unhandled-error" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "unnecessary-stmt" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "unreachable-code" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "unused-parameter" |
||||
|
||||
# We generally have unused receivers in tests for meeting the |
||||
# requirements of an interface. |
||||
# [[linters-settings.revive.rules]] |
||||
# name = "unused-receiver" |
||||
|
||||
# This probably makes sense after we upgrade to 1.18 |
||||
# [[linters-settings.revive.rules]] |
||||
# name = "use-any" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "useless-break" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "var-declaration" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "var-naming" |
||||
|
||||
[[linters-settings.revive.rules]] |
||||
name = "waitgroup-by-value" |
||||
|
||||
[linters-settings.unparam] |
||||
check-exported = true |
||||
|
||||
[[issues.exclude-rules]] |
||||
linters = [ |
||||
"govet" |
||||
] |
||||
# we want to enable almost all govet rules. It is easier to just filter out |
||||
# the ones we don't want: |
||||
# |
||||
# * fieldalignment - way too noisy. Although it is very useful in particular |
||||
# cases where we are trying to use as little memory as possible, having |
||||
# it go off on every struct isn't helpful. |
||||
# * shadow - although often useful, it complains about _many_ err |
||||
# shadowing assignments and some others where shadowing is clear. |
||||
text = "^(fieldalignment|shadow)" |
||||
@ -0,0 +1,15 @@ |
||||
ISC License |
||||
|
||||
Copyright (c) 2015, Gregory J. Oschwald <oschwald@gmail.com> |
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any |
||||
purpose with or without fee is hereby granted, provided that the above |
||||
copyright notice and this permission notice appear in all copies. |
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH |
||||
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY |
||||
AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, |
||||
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM |
||||
LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR |
||||
OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR |
||||
PERFORMANCE OF THIS SOFTWARE. |
||||
@ -0,0 +1,36 @@ |
||||
# MaxMind DB Reader for Go # |
||||
|
||||
[](https://godoc.org/github.com/oschwald/maxminddb-golang) |
||||
|
||||
This is a Go reader for the MaxMind DB format. Although this can be used to |
||||
read [GeoLite2](http://dev.maxmind.com/geoip/geoip2/geolite2/) and |
||||
[GeoIP2](https://www.maxmind.com/en/geoip2-databases) databases, |
||||
[geoip2](https://github.com/oschwald/geoip2-golang) provides a higher-level |
||||
API for doing so. |
||||
|
||||
This is not an official MaxMind API. |
||||
|
||||
## Installation ## |
||||
|
||||
``` |
||||
go get github.com/oschwald/maxminddb-golang |
||||
``` |
||||
|
||||
## Usage ## |
||||
|
||||
[See GoDoc](http://godoc.org/github.com/oschwald/maxminddb-golang) for |
||||
documentation and examples. |
||||
|
||||
## Examples ## |
||||
|
||||
See [GoDoc](http://godoc.org/github.com/oschwald/maxminddb-golang) or |
||||
`example_test.go` for examples. |
||||
|
||||
## Contributing ## |
||||
|
||||
Contributions welcome! Please fork the repository and open a pull request |
||||
with your changes. |
||||
|
||||
## License ## |
||||
|
||||
This is free software, licensed under the ISC License. |
||||
@ -0,0 +1,897 @@ |
||||
package maxminddb |
||||
|
||||
import ( |
||||
"encoding/binary" |
||||
"math" |
||||
"math/big" |
||||
"reflect" |
||||
"sync" |
||||
) |
||||
|
||||
type decoder struct { |
||||
buffer []byte |
||||
} |
||||
|
||||
type dataType int |
||||
|
||||
const ( |
||||
_Extended dataType = iota |
||||
_Pointer |
||||
_String |
||||
_Float64 |
||||
_Bytes |
||||
_Uint16 |
||||
_Uint32 |
||||
_Map |
||||
_Int32 |
||||
_Uint64 |
||||
_Uint128 |
||||
_Slice |
||||
// We don't use the next two. They are placeholders. See the spec
|
||||
// for more details.
|
||||
_Container //nolint: deadcode, varcheck // above
|
||||
_Marker //nolint: deadcode, varcheck // above
|
||||
_Bool |
||||
_Float32 |
||||
) |
||||
|
||||
const ( |
||||
// This is the value used in libmaxminddb.
|
||||
maximumDataStructureDepth = 512 |
||||
) |
||||
|
||||
func (d *decoder) decode(offset uint, result reflect.Value, depth int) (uint, error) { |
||||
if depth > maximumDataStructureDepth { |
||||
return 0, newInvalidDatabaseError( |
||||
"exceeded maximum data structure depth; database is likely corrupt", |
||||
) |
||||
} |
||||
typeNum, size, newOffset, err := d.decodeCtrlData(offset) |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
|
||||
if typeNum != _Pointer && result.Kind() == reflect.Uintptr { |
||||
result.Set(reflect.ValueOf(uintptr(offset))) |
||||
return d.nextValueOffset(offset, 1) |
||||
} |
||||
return d.decodeFromType(typeNum, size, newOffset, result, depth+1) |
||||
} |
||||
|
||||
func (d *decoder) decodeToDeserializer( |
||||
offset uint, |
||||
dser deserializer, |
||||
depth int, |
||||
getNext bool, |
||||
) (uint, error) { |
||||
if depth > maximumDataStructureDepth { |
||||
return 0, newInvalidDatabaseError( |
||||
"exceeded maximum data structure depth; database is likely corrupt", |
||||
) |
||||
} |
||||
skip, err := dser.ShouldSkip(uintptr(offset)) |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
if skip { |
||||
if getNext { |
||||
return d.nextValueOffset(offset, 1) |
||||
} |
||||
return 0, nil |
||||
} |
||||
|
||||
typeNum, size, newOffset, err := d.decodeCtrlData(offset) |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
|
||||
return d.decodeFromTypeToDeserializer(typeNum, size, newOffset, dser, depth+1) |
||||
} |
||||
|
||||
func (d *decoder) decodeCtrlData(offset uint) (dataType, uint, uint, error) { |
||||
newOffset := offset + 1 |
||||
if offset >= uint(len(d.buffer)) { |
||||
return 0, 0, 0, newOffsetError() |
||||
} |
||||
ctrlByte := d.buffer[offset] |
||||
|
||||
typeNum := dataType(ctrlByte >> 5) |
||||
if typeNum == _Extended { |
||||
if newOffset >= uint(len(d.buffer)) { |
||||
return 0, 0, 0, newOffsetError() |
||||
} |
||||
typeNum = dataType(d.buffer[newOffset] + 7) |
||||
newOffset++ |
||||
} |
||||
|
||||
var size uint |
||||
size, newOffset, err := d.sizeFromCtrlByte(ctrlByte, newOffset, typeNum) |
||||
return typeNum, size, newOffset, err |
||||
} |
||||
|
||||
func (d *decoder) sizeFromCtrlByte( |
||||
ctrlByte byte, |
||||
offset uint, |
||||
typeNum dataType, |
||||
) (uint, uint, error) { |
||||
size := uint(ctrlByte & 0x1f) |
||||
if typeNum == _Extended { |
||||
return size, offset, nil |
||||
} |
||||
|
||||
var bytesToRead uint |
||||
if size < 29 { |
||||
return size, offset, nil |
||||
} |
||||
|
||||
bytesToRead = size - 28 |
||||
newOffset := offset + bytesToRead |
||||
if newOffset > uint(len(d.buffer)) { |
||||
return 0, 0, newOffsetError() |
||||
} |
||||
if size == 29 { |
||||
return 29 + uint(d.buffer[offset]), offset + 1, nil |
||||
} |
||||
|
||||
sizeBytes := d.buffer[offset:newOffset] |
||||
|
||||
switch { |
||||
case size == 30: |
||||
size = 285 + uintFromBytes(0, sizeBytes) |
||||
case size > 30: |
||||
size = uintFromBytes(0, sizeBytes) + 65821 |
||||
} |
||||
return size, newOffset, nil |
||||
} |
||||
|
||||
func (d *decoder) decodeFromType( |
||||
dtype dataType, |
||||
size uint, |
||||
offset uint, |
||||
result reflect.Value, |
||||
depth int, |
||||
) (uint, error) { |
||||
result = d.indirect(result) |
||||
|
||||
// For these types, size has a special meaning
|
||||
switch dtype { |
||||
case _Bool: |
||||
return d.unmarshalBool(size, offset, result) |
||||
case _Map: |
||||
return d.unmarshalMap(size, offset, result, depth) |
||||
case _Pointer: |
||||
return d.unmarshalPointer(size, offset, result, depth) |
||||
case _Slice: |
||||
return d.unmarshalSlice(size, offset, result, depth) |
||||
} |
||||
|
||||
// For the remaining types, size is the byte size
|
||||
if offset+size > uint(len(d.buffer)) { |
||||
return 0, newOffsetError() |
||||
} |
||||
switch dtype { |
||||
case _Bytes: |
||||
return d.unmarshalBytes(size, offset, result) |
||||
case _Float32: |
||||
return d.unmarshalFloat32(size, offset, result) |
||||
case _Float64: |
||||
return d.unmarshalFloat64(size, offset, result) |
||||
case _Int32: |
||||
return d.unmarshalInt32(size, offset, result) |
||||
case _String: |
||||
return d.unmarshalString(size, offset, result) |
||||
case _Uint16: |
||||
return d.unmarshalUint(size, offset, result, 16) |
||||
case _Uint32: |
||||
return d.unmarshalUint(size, offset, result, 32) |
||||
case _Uint64: |
||||
return d.unmarshalUint(size, offset, result, 64) |
||||
case _Uint128: |
||||
return d.unmarshalUint128(size, offset, result) |
||||
default: |
||||
return 0, newInvalidDatabaseError("unknown type: %d", dtype) |
||||
} |
||||
} |
||||
|
||||
func (d *decoder) decodeFromTypeToDeserializer( |
||||
dtype dataType, |
||||
size uint, |
||||
offset uint, |
||||
dser deserializer, |
||||
depth int, |
||||
) (uint, error) { |
||||
// For these types, size has a special meaning
|
||||
switch dtype { |
||||
case _Bool: |
||||
v, offset := d.decodeBool(size, offset) |
||||
return offset, dser.Bool(v) |
||||
case _Map: |
||||
return d.decodeMapToDeserializer(size, offset, dser, depth) |
||||
case _Pointer: |
||||
pointer, newOffset, err := d.decodePointer(size, offset) |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
_, err = d.decodeToDeserializer(pointer, dser, depth, false) |
||||
return newOffset, err |
||||
case _Slice: |
||||
return d.decodeSliceToDeserializer(size, offset, dser, depth) |
||||
} |
||||
|
||||
// For the remaining types, size is the byte size
|
||||
if offset+size > uint(len(d.buffer)) { |
||||
return 0, newOffsetError() |
||||
} |
||||
switch dtype { |
||||
case _Bytes: |
||||
v, offset := d.decodeBytes(size, offset) |
||||
return offset, dser.Bytes(v) |
||||
case _Float32: |
||||
v, offset := d.decodeFloat32(size, offset) |
||||
return offset, dser.Float32(v) |
||||
case _Float64: |
||||
v, offset := d.decodeFloat64(size, offset) |
||||
return offset, dser.Float64(v) |
||||
case _Int32: |
||||
v, offset := d.decodeInt(size, offset) |
||||
return offset, dser.Int32(int32(v)) |
||||
case _String: |
||||
v, offset := d.decodeString(size, offset) |
||||
return offset, dser.String(v) |
||||
case _Uint16: |
||||
v, offset := d.decodeUint(size, offset) |
||||
return offset, dser.Uint16(uint16(v)) |
||||
case _Uint32: |
||||
v, offset := d.decodeUint(size, offset) |
||||
return offset, dser.Uint32(uint32(v)) |
||||
case _Uint64: |
||||
v, offset := d.decodeUint(size, offset) |
||||
return offset, dser.Uint64(v) |
||||
case _Uint128: |
||||
v, offset := d.decodeUint128(size, offset) |
||||
return offset, dser.Uint128(v) |
||||
default: |
||||
return 0, newInvalidDatabaseError("unknown type: %d", dtype) |
||||
} |
||||
} |
||||
|
||||
func (d *decoder) unmarshalBool(size, offset uint, result reflect.Value) (uint, error) { |
||||
if size > 1 { |
||||
return 0, newInvalidDatabaseError( |
||||
"the MaxMind DB file's data section contains bad data (bool size of %v)", |
||||
size, |
||||
) |
||||
} |
||||
value, newOffset := d.decodeBool(size, offset) |
||||
|
||||
switch result.Kind() { |
||||
case reflect.Bool: |
||||
result.SetBool(value) |
||||
return newOffset, nil |
||||
case reflect.Interface: |
||||
if result.NumMethod() == 0 { |
||||
result.Set(reflect.ValueOf(value)) |
||||
return newOffset, nil |
||||
} |
||||
} |
||||
return newOffset, newUnmarshalTypeError(value, result.Type()) |
||||
} |
||||
|
||||
// indirect follows pointers and create values as necessary. This is
|
||||
// heavily based on encoding/json as my original version had a subtle
|
||||
// bug. This method should be considered to be licensed under
|
||||
// https://golang.org/LICENSE
|
||||
func (d *decoder) indirect(result reflect.Value) reflect.Value { |
||||
for { |
||||
// Load value from interface, but only if the result will be
|
||||
// usefully addressable.
|
||||
if result.Kind() == reflect.Interface && !result.IsNil() { |
||||
e := result.Elem() |
||||
if e.Kind() == reflect.Ptr && !e.IsNil() { |
||||
result = e |
||||
continue |
||||
} |
||||
} |
||||
|
||||
if result.Kind() != reflect.Ptr { |
||||
break |
||||
} |
||||
|
||||
if result.IsNil() { |
||||
result.Set(reflect.New(result.Type().Elem())) |
||||
} |
||||
|
||||
result = result.Elem() |
||||
} |
||||
return result |
||||
} |
||||
|
||||
var sliceType = reflect.TypeOf([]byte{}) |
||||
|
||||
func (d *decoder) unmarshalBytes(size, offset uint, result reflect.Value) (uint, error) { |
||||
value, newOffset := d.decodeBytes(size, offset) |
||||
|
||||
switch result.Kind() { |
||||
case reflect.Slice: |
||||
if result.Type() == sliceType { |
||||
result.SetBytes(value) |
||||
return newOffset, nil |
||||
} |
||||
case reflect.Interface: |
||||
if result.NumMethod() == 0 { |
||||
result.Set(reflect.ValueOf(value)) |
||||
return newOffset, nil |
||||
} |
||||
} |
||||
return newOffset, newUnmarshalTypeError(value, result.Type()) |
||||
} |
||||
|
||||
func (d *decoder) unmarshalFloat32(size, offset uint, result reflect.Value) (uint, error) { |
||||
if size != 4 { |
||||
return 0, newInvalidDatabaseError( |
||||
"the MaxMind DB file's data section contains bad data (float32 size of %v)", |
||||
size, |
||||
) |
||||
} |
||||
value, newOffset := d.decodeFloat32(size, offset) |
||||
|
||||
switch result.Kind() { |
||||
case reflect.Float32, reflect.Float64: |
||||
result.SetFloat(float64(value)) |
||||
return newOffset, nil |
||||
case reflect.Interface: |
||||
if result.NumMethod() == 0 { |
||||
result.Set(reflect.ValueOf(value)) |
||||
return newOffset, nil |
||||
} |
||||
} |
||||
return newOffset, newUnmarshalTypeError(value, result.Type()) |
||||
} |
||||
|
||||
func (d *decoder) unmarshalFloat64(size, offset uint, result reflect.Value) (uint, error) { |
||||
if size != 8 { |
||||
return 0, newInvalidDatabaseError( |
||||
"the MaxMind DB file's data section contains bad data (float 64 size of %v)", |
||||
size, |
||||
) |
||||
} |
||||
value, newOffset := d.decodeFloat64(size, offset) |
||||
|
||||
switch result.Kind() { |
||||
case reflect.Float32, reflect.Float64: |
||||
if result.OverflowFloat(value) { |
||||
return 0, newUnmarshalTypeError(value, result.Type()) |
||||
} |
||||
result.SetFloat(value) |
||||
return newOffset, nil |
||||
case reflect.Interface: |
||||
if result.NumMethod() == 0 { |
||||
result.Set(reflect.ValueOf(value)) |
||||
return newOffset, nil |
||||
} |
||||
} |
||||
return newOffset, newUnmarshalTypeError(value, result.Type()) |
||||
} |
||||
|
||||
func (d *decoder) unmarshalInt32(size, offset uint, result reflect.Value) (uint, error) { |
||||
if size > 4 { |
||||
return 0, newInvalidDatabaseError( |
||||
"the MaxMind DB file's data section contains bad data (int32 size of %v)", |
||||
size, |
||||
) |
||||
} |
||||
value, newOffset := d.decodeInt(size, offset) |
||||
|
||||
switch result.Kind() { |
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: |
||||
n := int64(value) |
||||
if !result.OverflowInt(n) { |
||||
result.SetInt(n) |
||||
return newOffset, nil |
||||
} |
||||
case reflect.Uint, |
||||
reflect.Uint8, |
||||
reflect.Uint16, |
||||
reflect.Uint32, |
||||
reflect.Uint64, |
||||
reflect.Uintptr: |
||||
n := uint64(value) |
||||
if !result.OverflowUint(n) { |
||||
result.SetUint(n) |
||||
return newOffset, nil |
||||
} |
||||
case reflect.Interface: |
||||
if result.NumMethod() == 0 { |
||||
result.Set(reflect.ValueOf(value)) |
||||
return newOffset, nil |
||||
} |
||||
} |
||||
return newOffset, newUnmarshalTypeError(value, result.Type()) |
||||
} |
||||
|
||||
func (d *decoder) unmarshalMap( |
||||
size uint, |
||||
offset uint, |
||||
result reflect.Value, |
||||
depth int, |
||||
) (uint, error) { |
||||
result = d.indirect(result) |
||||
switch result.Kind() { |
||||
default: |
||||
return 0, newUnmarshalTypeError("map", result.Type()) |
||||
case reflect.Struct: |
||||
return d.decodeStruct(size, offset, result, depth) |
||||
case reflect.Map: |
||||
return d.decodeMap(size, offset, result, depth) |
||||
case reflect.Interface: |
||||
if result.NumMethod() == 0 { |
||||
rv := reflect.ValueOf(make(map[string]interface{}, size)) |
||||
newOffset, err := d.decodeMap(size, offset, rv, depth) |
||||
result.Set(rv) |
||||
return newOffset, err |
||||
} |
||||
return 0, newUnmarshalTypeError("map", result.Type()) |
||||
} |
||||
} |
||||
|
||||
func (d *decoder) unmarshalPointer( |
||||
size, offset uint, |
||||
result reflect.Value, |
||||
depth int, |
||||
) (uint, error) { |
||||
pointer, newOffset, err := d.decodePointer(size, offset) |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
_, err = d.decode(pointer, result, depth) |
||||
return newOffset, err |
||||
} |
||||
|
||||
func (d *decoder) unmarshalSlice( |
||||
size uint, |
||||
offset uint, |
||||
result reflect.Value, |
||||
depth int, |
||||
) (uint, error) { |
||||
switch result.Kind() { |
||||
case reflect.Slice: |
||||
return d.decodeSlice(size, offset, result, depth) |
||||
case reflect.Interface: |
||||
if result.NumMethod() == 0 { |
||||
a := []interface{}{} |
||||
rv := reflect.ValueOf(&a).Elem() |
||||
newOffset, err := d.decodeSlice(size, offset, rv, depth) |
||||
result.Set(rv) |
||||
return newOffset, err |
||||
} |
||||
} |
||||
return 0, newUnmarshalTypeError("array", result.Type()) |
||||
} |
||||
|
||||
func (d *decoder) unmarshalString(size, offset uint, result reflect.Value) (uint, error) { |
||||
value, newOffset := d.decodeString(size, offset) |
||||
|
||||
switch result.Kind() { |
||||
case reflect.String: |
||||
result.SetString(value) |
||||
return newOffset, nil |
||||
case reflect.Interface: |
||||
if result.NumMethod() == 0 { |
||||
result.Set(reflect.ValueOf(value)) |
||||
return newOffset, nil |
||||
} |
||||
} |
||||
return newOffset, newUnmarshalTypeError(value, result.Type()) |
||||
} |
||||
|
||||
func (d *decoder) unmarshalUint( |
||||
size, offset uint, |
||||
result reflect.Value, |
||||
uintType uint, |
||||
) (uint, error) { |
||||
if size > uintType/8 { |
||||
return 0, newInvalidDatabaseError( |
||||
"the MaxMind DB file's data section contains bad data (uint%v size of %v)", |
||||
uintType, |
||||
size, |
||||
) |
||||
} |
||||
|
||||
value, newOffset := d.decodeUint(size, offset) |
||||
|
||||
switch result.Kind() { |
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: |
||||
n := int64(value) |
||||
if !result.OverflowInt(n) { |
||||
result.SetInt(n) |
||||
return newOffset, nil |
||||
} |
||||
case reflect.Uint, |
||||
reflect.Uint8, |
||||
reflect.Uint16, |
||||
reflect.Uint32, |
||||
reflect.Uint64, |
||||
reflect.Uintptr: |
||||
if !result.OverflowUint(value) { |
||||
result.SetUint(value) |
||||
return newOffset, nil |
||||
} |
||||
case reflect.Interface: |
||||
if result.NumMethod() == 0 { |
||||
result.Set(reflect.ValueOf(value)) |
||||
return newOffset, nil |
||||
} |
||||
} |
||||
return newOffset, newUnmarshalTypeError(value, result.Type()) |
||||
} |
||||
|
||||
var bigIntType = reflect.TypeOf(big.Int{}) |
||||
|
||||
func (d *decoder) unmarshalUint128(size, offset uint, result reflect.Value) (uint, error) { |
||||
if size > 16 { |
||||
return 0, newInvalidDatabaseError( |
||||
"the MaxMind DB file's data section contains bad data (uint128 size of %v)", |
||||
size, |
||||
) |
||||
} |
||||
value, newOffset := d.decodeUint128(size, offset) |
||||
|
||||
switch result.Kind() { |
||||
case reflect.Struct: |
||||
if result.Type() == bigIntType { |
||||
result.Set(reflect.ValueOf(*value)) |
||||
return newOffset, nil |
||||
} |
||||
case reflect.Interface: |
||||
if result.NumMethod() == 0 { |
||||
result.Set(reflect.ValueOf(value)) |
||||
return newOffset, nil |
||||
} |
||||
} |
||||
return newOffset, newUnmarshalTypeError(value, result.Type()) |
||||
} |
||||
|
||||
func (d *decoder) decodeBool(size, offset uint) (bool, uint) { |
||||
return size != 0, offset |
||||
} |
||||
|
||||
func (d *decoder) decodeBytes(size, offset uint) ([]byte, uint) { |
||||
newOffset := offset + size |
||||
bytes := make([]byte, size) |
||||
copy(bytes, d.buffer[offset:newOffset]) |
||||
return bytes, newOffset |
||||
} |
||||
|
||||
func (d *decoder) decodeFloat64(size, offset uint) (float64, uint) { |
||||
newOffset := offset + size |
||||
bits := binary.BigEndian.Uint64(d.buffer[offset:newOffset]) |
||||
return math.Float64frombits(bits), newOffset |
||||
} |
||||
|
||||
func (d *decoder) decodeFloat32(size, offset uint) (float32, uint) { |
||||
newOffset := offset + size |
||||
bits := binary.BigEndian.Uint32(d.buffer[offset:newOffset]) |
||||
return math.Float32frombits(bits), newOffset |
||||
} |
||||
|
||||
func (d *decoder) decodeInt(size, offset uint) (int, uint) { |
||||
newOffset := offset + size |
||||
var val int32 |
||||
for _, b := range d.buffer[offset:newOffset] { |
||||
val = (val << 8) | int32(b) |
||||
} |
||||
return int(val), newOffset |
||||
} |
||||
|
||||
func (d *decoder) decodeMap( |
||||
size uint, |
||||
offset uint, |
||||
result reflect.Value, |
||||
depth int, |
||||
) (uint, error) { |
||||
if result.IsNil() { |
||||
result.Set(reflect.MakeMapWithSize(result.Type(), int(size))) |
||||
} |
||||
|
||||
mapType := result.Type() |
||||
keyValue := reflect.New(mapType.Key()).Elem() |
||||
elemType := mapType.Elem() |
||||
elemKind := elemType.Kind() |
||||
var elemValue reflect.Value |
||||
for i := uint(0); i < size; i++ { |
||||
var key []byte |
||||
var err error |
||||
key, offset, err = d.decodeKey(offset) |
||||
|
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
|
||||
if !elemValue.IsValid() || elemKind == reflect.Interface { |
||||
elemValue = reflect.New(elemType).Elem() |
||||
} |
||||
|
||||
offset, err = d.decode(offset, elemValue, depth) |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
|
||||
keyValue.SetString(string(key)) |
||||
result.SetMapIndex(keyValue, elemValue) |
||||
} |
||||
return offset, nil |
||||
} |
||||
|
||||
func (d *decoder) decodeMapToDeserializer( |
||||
size uint, |
||||
offset uint, |
||||
dser deserializer, |
||||
depth int, |
||||
) (uint, error) { |
||||
err := dser.StartMap(size) |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
for i := uint(0); i < size; i++ { |
||||
// TODO - implement key/value skipping?
|
||||
offset, err = d.decodeToDeserializer(offset, dser, depth, true) |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
|
||||
offset, err = d.decodeToDeserializer(offset, dser, depth, true) |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
} |
||||
err = dser.End() |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
return offset, nil |
||||
} |
||||
|
||||
func (d *decoder) decodePointer( |
||||
size uint, |
||||
offset uint, |
||||
) (uint, uint, error) { |
||||
pointerSize := ((size >> 3) & 0x3) + 1 |
||||
newOffset := offset + pointerSize |
||||
if newOffset > uint(len(d.buffer)) { |
||||
return 0, 0, newOffsetError() |
||||
} |
||||
pointerBytes := d.buffer[offset:newOffset] |
||||
var prefix uint |
||||
if pointerSize == 4 { |
||||
prefix = 0 |
||||
} else { |
||||
prefix = size & 0x7 |
||||
} |
||||
unpacked := uintFromBytes(prefix, pointerBytes) |
||||
|
||||
var pointerValueOffset uint |
||||
switch pointerSize { |
||||
case 1: |
||||
pointerValueOffset = 0 |
||||
case 2: |
||||
pointerValueOffset = 2048 |
||||
case 3: |
||||
pointerValueOffset = 526336 |
||||
case 4: |
||||
pointerValueOffset = 0 |
||||
} |
||||
|
||||
pointer := unpacked + pointerValueOffset |
||||
|
||||
return pointer, newOffset, nil |
||||
} |
||||
|
||||
func (d *decoder) decodeSlice( |
||||
size uint, |
||||
offset uint, |
||||
result reflect.Value, |
||||
depth int, |
||||
) (uint, error) { |
||||
result.Set(reflect.MakeSlice(result.Type(), int(size), int(size))) |
||||
for i := 0; i < int(size); i++ { |
||||
var err error |
||||
offset, err = d.decode(offset, result.Index(i), depth) |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
} |
||||
return offset, nil |
||||
} |
||||
|
||||
func (d *decoder) decodeSliceToDeserializer( |
||||
size uint, |
||||
offset uint, |
||||
dser deserializer, |
||||
depth int, |
||||
) (uint, error) { |
||||
err := dser.StartSlice(size) |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
for i := uint(0); i < size; i++ { |
||||
offset, err = d.decodeToDeserializer(offset, dser, depth, true) |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
} |
||||
err = dser.End() |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
return offset, nil |
||||
} |
||||
|
||||
func (d *decoder) decodeString(size, offset uint) (string, uint) { |
||||
newOffset := offset + size |
||||
return string(d.buffer[offset:newOffset]), newOffset |
||||
} |
||||
|
||||
func (d *decoder) decodeStruct( |
||||
size uint, |
||||
offset uint, |
||||
result reflect.Value, |
||||
depth int, |
||||
) (uint, error) { |
||||
fields := cachedFields(result) |
||||
|
||||
// This fills in embedded structs
|
||||
for _, i := range fields.anonymousFields { |
||||
_, err := d.unmarshalMap(size, offset, result.Field(i), depth) |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
} |
||||
|
||||
// This handles named fields
|
||||
for i := uint(0); i < size; i++ { |
||||
var ( |
||||
err error |
||||
key []byte |
||||
) |
||||
key, offset, err = d.decodeKey(offset) |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
// The string() does not create a copy due to this compiler
|
||||
// optimization: https://github.com/golang/go/issues/3512
|
||||
j, ok := fields.namedFields[string(key)] |
||||
if !ok { |
||||
offset, err = d.nextValueOffset(offset, 1) |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
continue |
||||
} |
||||
|
||||
offset, err = d.decode(offset, result.Field(j), depth) |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
} |
||||
return offset, nil |
||||
} |
||||
|
||||
type fieldsType struct { |
||||
namedFields map[string]int |
||||
anonymousFields []int |
||||
} |
||||
|
||||
var fieldsMap sync.Map |
||||
|
||||
func cachedFields(result reflect.Value) *fieldsType { |
||||
resultType := result.Type() |
||||
|
||||
if fields, ok := fieldsMap.Load(resultType); ok { |
||||
return fields.(*fieldsType) |
||||
} |
||||
numFields := resultType.NumField() |
||||
namedFields := make(map[string]int, numFields) |
||||
var anonymous []int |
||||
for i := 0; i < numFields; i++ { |
||||
field := resultType.Field(i) |
||||
|
||||
fieldName := field.Name |
||||
if tag := field.Tag.Get("maxminddb"); tag != "" { |
||||
if tag == "-" { |
||||
continue |
||||
} |
||||
fieldName = tag |
||||
} |
||||
if field.Anonymous { |
||||
anonymous = append(anonymous, i) |
||||
continue |
||||
} |
||||
namedFields[fieldName] = i |
||||
} |
||||
fields := &fieldsType{namedFields, anonymous} |
||||
fieldsMap.Store(resultType, fields) |
||||
|
||||
return fields |
||||
} |
||||
|
||||
func (d *decoder) decodeUint(size, offset uint) (uint64, uint) { |
||||
newOffset := offset + size |
||||
bytes := d.buffer[offset:newOffset] |
||||
|
||||
var val uint64 |
||||
for _, b := range bytes { |
||||
val = (val << 8) | uint64(b) |
||||
} |
||||
return val, newOffset |
||||
} |
||||
|
||||
func (d *decoder) decodeUint128(size, offset uint) (*big.Int, uint) { |
||||
newOffset := offset + size |
||||
val := new(big.Int) |
||||
val.SetBytes(d.buffer[offset:newOffset]) |
||||
|
||||
return val, newOffset |
||||
} |
||||
|
||||
func uintFromBytes(prefix uint, uintBytes []byte) uint { |
||||
val := prefix |
||||
for _, b := range uintBytes { |
||||
val = (val << 8) | uint(b) |
||||
} |
||||
return val |
||||
} |
||||
|
||||
// decodeKey decodes a map key into []byte slice. We use a []byte so that we
|
||||
// can take advantage of https://github.com/golang/go/issues/3512 to avoid
|
||||
// copying the bytes when decoding a struct. Previously, we achieved this by
|
||||
// using unsafe.
|
||||
func (d *decoder) decodeKey(offset uint) ([]byte, uint, error) { |
||||
typeNum, size, dataOffset, err := d.decodeCtrlData(offset) |
||||
if err != nil { |
||||
return nil, 0, err |
||||
} |
||||
if typeNum == _Pointer { |
||||
pointer, ptrOffset, err := d.decodePointer(size, dataOffset) |
||||
if err != nil { |
||||
return nil, 0, err |
||||
} |
||||
key, _, err := d.decodeKey(pointer) |
||||
return key, ptrOffset, err |
||||
} |
||||
if typeNum != _String { |
||||
return nil, 0, newInvalidDatabaseError("unexpected type when decoding string: %v", typeNum) |
||||
} |
||||
newOffset := dataOffset + size |
||||
if newOffset > uint(len(d.buffer)) { |
||||
return nil, 0, newOffsetError() |
||||
} |
||||
return d.buffer[dataOffset:newOffset], newOffset, nil |
||||
} |
||||
|
||||
// This function is used to skip ahead to the next value without decoding
|
||||
// the one at the offset passed in. The size bits have different meanings for
|
||||
// different data types.
|
||||
func (d *decoder) nextValueOffset(offset, numberToSkip uint) (uint, error) { |
||||
if numberToSkip == 0 { |
||||
return offset, nil |
||||
} |
||||
typeNum, size, offset, err := d.decodeCtrlData(offset) |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
switch typeNum { |
||||
case _Pointer: |
||||
_, offset, err = d.decodePointer(size, offset) |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
case _Map: |
||||
numberToSkip += 2 * size |
||||
case _Slice: |
||||
numberToSkip += size |
||||
case _Bool: |
||||
default: |
||||
offset += size |
||||
} |
||||
return d.nextValueOffset(offset, numberToSkip-1) |
||||
} |
||||
@ -0,0 +1,31 @@ |
||||
package maxminddb |
||||
|
||||
import "math/big" |
||||
|
||||
// deserializer is an interface for a type that deserializes an MaxMind DB
|
||||
// data record to some other type. This exists as an alternative to the
|
||||
// standard reflection API.
|
||||
//
|
||||
// This is fundamentally different than the Unmarshaler interface that
|
||||
// several packages provide. A Deserializer will generally create the
|
||||
// final struct or value rather than unmarshaling to itself.
|
||||
//
|
||||
// This interface and the associated unmarshaling code is EXPERIMENTAL!
|
||||
// It is not currently covered by any Semantic Versioning guarantees.
|
||||
// Use at your own risk.
|
||||
type deserializer interface { |
||||
ShouldSkip(offset uintptr) (bool, error) |
||||
StartSlice(size uint) error |
||||
StartMap(size uint) error |
||||
End() error |
||||
String(string) error |
||||
Float64(float64) error |
||||
Bytes([]byte) error |
||||
Uint16(uint16) error |
||||
Uint32(uint32) error |
||||
Int32(int32) error |
||||
Uint64(uint64) error |
||||
Uint128(*big.Int) error |
||||
Bool(bool) error |
||||
Float32(float32) error |
||||
} |
||||
@ -0,0 +1,42 @@ |
||||
package maxminddb |
||||
|
||||
import ( |
||||
"fmt" |
||||
"reflect" |
||||
) |
||||
|
||||
// InvalidDatabaseError is returned when the database contains invalid data
|
||||
// and cannot be parsed.
|
||||
type InvalidDatabaseError struct { |
||||
message string |
||||
} |
||||
|
||||
func newOffsetError() InvalidDatabaseError { |
||||
return InvalidDatabaseError{"unexpected end of database"} |
||||
} |
||||
|
||||
func newInvalidDatabaseError(format string, args ...interface{}) InvalidDatabaseError { |
||||
return InvalidDatabaseError{fmt.Sprintf(format, args...)} |
||||
} |
||||
|
||||
func (e InvalidDatabaseError) Error() string { |
||||
return e.message |
||||
} |
||||
|
||||
// UnmarshalTypeError is returned when the value in the database cannot be
|
||||
// assigned to the specified data type.
|
||||
type UnmarshalTypeError struct { |
||||
Value string // stringified copy of the database value that caused the error
|
||||
Type reflect.Type // type of the value that could not be assign to
|
||||
} |
||||
|
||||
func newUnmarshalTypeError(value interface{}, rType reflect.Type) UnmarshalTypeError { |
||||
return UnmarshalTypeError{ |
||||
Value: fmt.Sprintf("%v", value), |
||||
Type: rType, |
||||
} |
||||
} |
||||
|
||||
func (e UnmarshalTypeError) Error() string { |
||||
return fmt.Sprintf("maxminddb: cannot unmarshal %s into type %s", e.Value, e.Type.String()) |
||||
} |
||||
@ -0,0 +1,16 @@ |
||||
//go:build !windows && !appengine && !plan9
|
||||
// +build !windows,!appengine,!plan9
|
||||
|
||||
package maxminddb |
||||
|
||||
import ( |
||||
"golang.org/x/sys/unix" |
||||
) |
||||
|
||||
func mmap(fd, length int) (data []byte, err error) { |
||||
return unix.Mmap(fd, 0, length, unix.PROT_READ, unix.MAP_SHARED) |
||||
} |
||||
|
||||
func munmap(b []byte) (err error) { |
||||
return unix.Munmap(b) |
||||
} |
||||
@ -0,0 +1,85 @@ |
||||
// +build windows,!appengine
|
||||
|
||||
package maxminddb |
||||
|
||||
// Windows support largely borrowed from mmap-go.
|
||||
//
|
||||
// Copyright 2011 Evan Shaw. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
import ( |
||||
"errors" |
||||
"os" |
||||
"reflect" |
||||
"sync" |
||||
"unsafe" |
||||
|
||||
"golang.org/x/sys/windows" |
||||
) |
||||
|
||||
type memoryMap []byte |
||||
|
||||
// Windows
|
||||
var handleLock sync.Mutex |
||||
var handleMap = map[uintptr]windows.Handle{} |
||||
|
||||
func mmap(fd int, length int) (data []byte, err error) { |
||||
h, errno := windows.CreateFileMapping(windows.Handle(fd), nil, |
||||
uint32(windows.PAGE_READONLY), 0, uint32(length), nil) |
||||
if h == 0 { |
||||
return nil, os.NewSyscallError("CreateFileMapping", errno) |
||||
} |
||||
|
||||
addr, errno := windows.MapViewOfFile(h, uint32(windows.FILE_MAP_READ), 0, |
||||
0, uintptr(length)) |
||||
if addr == 0 { |
||||
return nil, os.NewSyscallError("MapViewOfFile", errno) |
||||
} |
||||
handleLock.Lock() |
||||
handleMap[addr] = h |
||||
handleLock.Unlock() |
||||
|
||||
m := memoryMap{} |
||||
dh := m.header() |
||||
dh.Data = addr |
||||
dh.Len = length |
||||
dh.Cap = dh.Len |
||||
|
||||
return m, nil |
||||
} |
||||
|
||||
func (m *memoryMap) header() *reflect.SliceHeader { |
||||
return (*reflect.SliceHeader)(unsafe.Pointer(m)) |
||||
} |
||||
|
||||
func flush(addr, len uintptr) error { |
||||
errno := windows.FlushViewOfFile(addr, len) |
||||
return os.NewSyscallError("FlushViewOfFile", errno) |
||||
} |
||||
|
||||
func munmap(b []byte) (err error) { |
||||
m := memoryMap(b) |
||||
dh := m.header() |
||||
|
||||
addr := dh.Data |
||||
length := uintptr(dh.Len) |
||||
|
||||
flush(addr, length) |
||||
err = windows.UnmapViewOfFile(addr) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
handleLock.Lock() |
||||
defer handleLock.Unlock() |
||||
handle, ok := handleMap[addr] |
||||
if !ok { |
||||
// should be impossible; we would've errored above
|
||||
return errors.New("unknown base address") |
||||
} |
||||
delete(handleMap, addr) |
||||
|
||||
e := windows.CloseHandle(windows.Handle(handle)) |
||||
return os.NewSyscallError("CloseHandle", e) |
||||
} |
||||
@ -0,0 +1,58 @@ |
||||
package maxminddb |
||||
|
||||
type nodeReader interface { |
||||
readLeft(uint) uint |
||||
readRight(uint) uint |
||||
} |
||||
|
||||
type nodeReader24 struct { |
||||
buffer []byte |
||||
} |
||||
|
||||
func (n nodeReader24) readLeft(nodeNumber uint) uint { |
||||
return (uint(n.buffer[nodeNumber]) << 16) | |
||||
(uint(n.buffer[nodeNumber+1]) << 8) | |
||||
uint(n.buffer[nodeNumber+2]) |
||||
} |
||||
|
||||
func (n nodeReader24) readRight(nodeNumber uint) uint { |
||||
return (uint(n.buffer[nodeNumber+3]) << 16) | |
||||
(uint(n.buffer[nodeNumber+4]) << 8) | |
||||
uint(n.buffer[nodeNumber+5]) |
||||
} |
||||
|
||||
type nodeReader28 struct { |
||||
buffer []byte |
||||
} |
||||
|
||||
func (n nodeReader28) readLeft(nodeNumber uint) uint { |
||||
return ((uint(n.buffer[nodeNumber+3]) & 0xF0) << 20) | |
||||
(uint(n.buffer[nodeNumber]) << 16) | |
||||
(uint(n.buffer[nodeNumber+1]) << 8) | |
||||
uint(n.buffer[nodeNumber+2]) |
||||
} |
||||
|
||||
func (n nodeReader28) readRight(nodeNumber uint) uint { |
||||
return ((uint(n.buffer[nodeNumber+3]) & 0x0F) << 24) | |
||||
(uint(n.buffer[nodeNumber+4]) << 16) | |
||||
(uint(n.buffer[nodeNumber+5]) << 8) | |
||||
uint(n.buffer[nodeNumber+6]) |
||||
} |
||||
|
||||
type nodeReader32 struct { |
||||
buffer []byte |
||||
} |
||||
|
||||
func (n nodeReader32) readLeft(nodeNumber uint) uint { |
||||
return (uint(n.buffer[nodeNumber]) << 24) | |
||||
(uint(n.buffer[nodeNumber+1]) << 16) | |
||||
(uint(n.buffer[nodeNumber+2]) << 8) | |
||||
uint(n.buffer[nodeNumber+3]) |
||||
} |
||||
|
||||
func (n nodeReader32) readRight(nodeNumber uint) uint { |
||||
return (uint(n.buffer[nodeNumber+4]) << 24) | |
||||
(uint(n.buffer[nodeNumber+5]) << 16) | |
||||
(uint(n.buffer[nodeNumber+6]) << 8) | |
||||
uint(n.buffer[nodeNumber+7]) |
||||
} |
||||
@ -0,0 +1,310 @@ |
||||
// Package maxminddb provides a reader for the MaxMind DB file format.
|
||||
package maxminddb |
||||
|
||||
import ( |
||||
"bytes" |
||||
"errors" |
||||
"fmt" |
||||
"net" |
||||
"reflect" |
||||
) |
||||
|
||||
const ( |
||||
// NotFound is returned by LookupOffset when a matched root record offset
|
||||
// cannot be found.
|
||||
NotFound = ^uintptr(0) |
||||
|
||||
dataSectionSeparatorSize = 16 |
||||
) |
||||
|
||||
var metadataStartMarker = []byte("\xAB\xCD\xEFMaxMind.com") |
||||
|
||||
// Reader holds the data corresponding to the MaxMind DB file. Its only public
|
||||
// field is Metadata, which contains the metadata from the MaxMind DB file.
|
||||
//
|
||||
// All of the methods on Reader are thread-safe. The struct may be safely
|
||||
// shared across goroutines.
|
||||
type Reader struct { |
||||
hasMappedFile bool |
||||
buffer []byte |
||||
nodeReader nodeReader |
||||
decoder decoder |
||||
Metadata Metadata |
||||
ipv4Start uint |
||||
ipv4StartBitDepth int |
||||
nodeOffsetMult uint |
||||
} |
||||
|
||||
// Metadata holds the metadata decoded from the MaxMind DB file. In particular
|
||||
// it has the format version, the build time as Unix epoch time, the database
|
||||
// type and description, the IP version supported, and a slice of the natural
|
||||
// languages included.
|
||||
type Metadata struct { |
||||
BinaryFormatMajorVersion uint `maxminddb:"binary_format_major_version"` |
||||
BinaryFormatMinorVersion uint `maxminddb:"binary_format_minor_version"` |
||||
BuildEpoch uint `maxminddb:"build_epoch"` |
||||
DatabaseType string `maxminddb:"database_type"` |
||||
Description map[string]string `maxminddb:"description"` |
||||
IPVersion uint `maxminddb:"ip_version"` |
||||
Languages []string `maxminddb:"languages"` |
||||
NodeCount uint `maxminddb:"node_count"` |
||||
RecordSize uint `maxminddb:"record_size"` |
||||
} |
||||
|
||||
// FromBytes takes a byte slice corresponding to a MaxMind DB file and returns
|
||||
// a Reader structure or an error.
|
||||
func FromBytes(buffer []byte) (*Reader, error) { |
||||
metadataStart := bytes.LastIndex(buffer, metadataStartMarker) |
||||
|
||||
if metadataStart == -1 { |
||||
return nil, newInvalidDatabaseError("error opening database: invalid MaxMind DB file") |
||||
} |
||||
|
||||
metadataStart += len(metadataStartMarker) |
||||
metadataDecoder := decoder{buffer[metadataStart:]} |
||||
|
||||
var metadata Metadata |
||||
|
||||
rvMetdata := reflect.ValueOf(&metadata) |
||||
_, err := metadataDecoder.decode(0, rvMetdata, 0) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
searchTreeSize := metadata.NodeCount * metadata.RecordSize / 4 |
||||
dataSectionStart := searchTreeSize + dataSectionSeparatorSize |
||||
dataSectionEnd := uint(metadataStart - len(metadataStartMarker)) |
||||
if dataSectionStart > dataSectionEnd { |
||||
return nil, newInvalidDatabaseError("the MaxMind DB contains invalid metadata") |
||||
} |
||||
d := decoder{ |
||||
buffer[searchTreeSize+dataSectionSeparatorSize : metadataStart-len(metadataStartMarker)], |
||||
} |
||||
|
||||
nodeBuffer := buffer[:searchTreeSize] |
||||
var nodeReader nodeReader |
||||
switch metadata.RecordSize { |
||||
case 24: |
||||
nodeReader = nodeReader24{buffer: nodeBuffer} |
||||
case 28: |
||||
nodeReader = nodeReader28{buffer: nodeBuffer} |
||||
case 32: |
||||
nodeReader = nodeReader32{buffer: nodeBuffer} |
||||
default: |
||||
return nil, newInvalidDatabaseError("unknown record size: %d", metadata.RecordSize) |
||||
} |
||||
|
||||
reader := &Reader{ |
||||
buffer: buffer, |
||||
nodeReader: nodeReader, |
||||
decoder: d, |
||||
Metadata: metadata, |
||||
ipv4Start: 0, |
||||
nodeOffsetMult: metadata.RecordSize / 4, |
||||
} |
||||
|
||||
reader.setIPv4Start() |
||||
|
||||
return reader, err |
||||
} |
||||
|
||||
func (r *Reader) setIPv4Start() { |
||||
if r.Metadata.IPVersion != 6 { |
||||
return |
||||
} |
||||
|
||||
nodeCount := r.Metadata.NodeCount |
||||
|
||||
node := uint(0) |
||||
i := 0 |
||||
for ; i < 96 && node < nodeCount; i++ { |
||||
node = r.nodeReader.readLeft(node * r.nodeOffsetMult) |
||||
} |
||||
r.ipv4Start = node |
||||
r.ipv4StartBitDepth = i |
||||
} |
||||
|
||||
// Lookup retrieves the database record for ip and stores it in the value
|
||||
// pointed to by result. If result is nil or not a pointer, an error is
|
||||
// returned. If the data in the database record cannot be stored in result
|
||||
// because of type differences, an UnmarshalTypeError is returned. If the
|
||||
// database is invalid or otherwise cannot be read, an InvalidDatabaseError
|
||||
// is returned.
|
||||
func (r *Reader) Lookup(ip net.IP, result interface{}) error { |
||||
if r.buffer == nil { |
||||
return errors.New("cannot call Lookup on a closed database") |
||||
} |
||||
pointer, _, _, err := r.lookupPointer(ip) |
||||
if pointer == 0 || err != nil { |
||||
return err |
||||
} |
||||
return r.retrieveData(pointer, result) |
||||
} |
||||
|
||||
// LookupNetwork retrieves the database record for ip and stores it in the
|
||||
// value pointed to by result. The network returned is the network associated
|
||||
// with the data record in the database. The ok return value indicates whether
|
||||
// the database contained a record for the ip.
|
||||
//
|
||||
// If result is nil or not a pointer, an error is returned. If the data in the
|
||||
// database record cannot be stored in result because of type differences, an
|
||||
// UnmarshalTypeError is returned. If the database is invalid or otherwise
|
||||
// cannot be read, an InvalidDatabaseError is returned.
|
||||
func (r *Reader) LookupNetwork( |
||||
ip net.IP, |
||||
result interface{}, |
||||
) (network *net.IPNet, ok bool, err error) { |
||||
if r.buffer == nil { |
||||
return nil, false, errors.New("cannot call Lookup on a closed database") |
||||
} |
||||
pointer, prefixLength, ip, err := r.lookupPointer(ip) |
||||
|
||||
network = r.cidr(ip, prefixLength) |
||||
if pointer == 0 || err != nil { |
||||
return network, false, err |
||||
} |
||||
|
||||
return network, true, r.retrieveData(pointer, result) |
||||
} |
||||
|
||||
// LookupOffset maps an argument net.IP to a corresponding record offset in the
|
||||
// database. NotFound is returned if no such record is found, and a record may
|
||||
// otherwise be extracted by passing the returned offset to Decode. LookupOffset
|
||||
// is an advanced API, which exists to provide clients with a means to cache
|
||||
// previously-decoded records.
|
||||
func (r *Reader) LookupOffset(ip net.IP) (uintptr, error) { |
||||
if r.buffer == nil { |
||||
return 0, errors.New("cannot call LookupOffset on a closed database") |
||||
} |
||||
pointer, _, _, err := r.lookupPointer(ip) |
||||
if pointer == 0 || err != nil { |
||||
return NotFound, err |
||||
} |
||||
return r.resolveDataPointer(pointer) |
||||
} |
||||
|
||||
func (r *Reader) cidr(ip net.IP, prefixLength int) *net.IPNet { |
||||
// This is necessary as the node that the IPv4 start is at may
|
||||
// be at a bit depth that is less that 96, i.e., ipv4Start points
|
||||
// to a leaf node. For instance, if a record was inserted at ::/8,
|
||||
// the ipv4Start would point directly at the leaf node for the
|
||||
// record and would have a bit depth of 8. This would not happen
|
||||
// with databases currently distributed by MaxMind as all of them
|
||||
// have an IPv4 subtree that is greater than a single node.
|
||||
if r.Metadata.IPVersion == 6 && |
||||
len(ip) == net.IPv4len && |
||||
r.ipv4StartBitDepth != 96 { |
||||
return &net.IPNet{IP: net.ParseIP("::"), Mask: net.CIDRMask(r.ipv4StartBitDepth, 128)} |
||||
} |
||||
|
||||
mask := net.CIDRMask(prefixLength, len(ip)*8) |
||||
return &net.IPNet{IP: ip.Mask(mask), Mask: mask} |
||||
} |
||||
|
||||
// Decode the record at |offset| into |result|. The result value pointed to
|
||||
// must be a data value that corresponds to a record in the database. This may
|
||||
// include a struct representation of the data, a map capable of holding the
|
||||
// data or an empty interface{} value.
|
||||
//
|
||||
// If result is a pointer to a struct, the struct need not include a field
|
||||
// for every value that may be in the database. If a field is not present in
|
||||
// the structure, the decoder will not decode that field, reducing the time
|
||||
// required to decode the record.
|
||||
//
|
||||
// As a special case, a struct field of type uintptr will be used to capture
|
||||
// the offset of the value. Decode may later be used to extract the stored
|
||||
// value from the offset. MaxMind DBs are highly normalized: for example in
|
||||
// the City database, all records of the same country will reference a
|
||||
// single representative record for that country. This uintptr behavior allows
|
||||
// clients to leverage this normalization in their own sub-record caching.
|
||||
func (r *Reader) Decode(offset uintptr, result interface{}) error { |
||||
if r.buffer == nil { |
||||
return errors.New("cannot call Decode on a closed database") |
||||
} |
||||
return r.decode(offset, result) |
||||
} |
||||
|
||||
func (r *Reader) decode(offset uintptr, result interface{}) error { |
||||
rv := reflect.ValueOf(result) |
||||
if rv.Kind() != reflect.Ptr || rv.IsNil() { |
||||
return errors.New("result param must be a pointer") |
||||
} |
||||
|
||||
if dser, ok := result.(deserializer); ok { |
||||
_, err := r.decoder.decodeToDeserializer(uint(offset), dser, 0, false) |
||||
return err |
||||
} |
||||
|
||||
_, err := r.decoder.decode(uint(offset), rv, 0) |
||||
return err |
||||
} |
||||
|
||||
func (r *Reader) lookupPointer(ip net.IP) (uint, int, net.IP, error) { |
||||
if ip == nil { |
||||
return 0, 0, nil, errors.New("IP passed to Lookup cannot be nil") |
||||
} |
||||
|
||||
ipV4Address := ip.To4() |
||||
if ipV4Address != nil { |
||||
ip = ipV4Address |
||||
} |
||||
if len(ip) == 16 && r.Metadata.IPVersion == 4 { |
||||
return 0, 0, ip, fmt.Errorf( |
||||
"error looking up '%s': you attempted to look up an IPv6 address in an IPv4-only database", |
||||
ip.String(), |
||||
) |
||||
} |
||||
|
||||
bitCount := uint(len(ip) * 8) |
||||
|
||||
var node uint |
||||
if bitCount == 32 { |
||||
node = r.ipv4Start |
||||
} |
||||
node, prefixLength := r.traverseTree(ip, node, bitCount) |
||||
|
||||
nodeCount := r.Metadata.NodeCount |
||||
if node == nodeCount { |
||||
// Record is empty
|
||||
return 0, prefixLength, ip, nil |
||||
} else if node > nodeCount { |
||||
return node, prefixLength, ip, nil |
||||
} |
||||
|
||||
return 0, prefixLength, ip, newInvalidDatabaseError("invalid node in search tree") |
||||
} |
||||
|
||||
func (r *Reader) traverseTree(ip net.IP, node, bitCount uint) (uint, int) { |
||||
nodeCount := r.Metadata.NodeCount |
||||
|
||||
i := uint(0) |
||||
for ; i < bitCount && node < nodeCount; i++ { |
||||
bit := uint(1) & (uint(ip[i>>3]) >> (7 - (i % 8))) |
||||
|
||||
offset := node * r.nodeOffsetMult |
||||
if bit == 0 { |
||||
node = r.nodeReader.readLeft(offset) |
||||
} else { |
||||
node = r.nodeReader.readRight(offset) |
||||
} |
||||
} |
||||
|
||||
return node, int(i) |
||||
} |
||||
|
||||
func (r *Reader) retrieveData(pointer uint, result interface{}) error { |
||||
offset, err := r.resolveDataPointer(pointer) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
return r.decode(offset, result) |
||||
} |
||||
|
||||
func (r *Reader) resolveDataPointer(pointer uint) (uintptr, error) { |
||||
resolved := uintptr(pointer - r.Metadata.NodeCount - dataSectionSeparatorSize) |
||||
|
||||
if resolved >= uintptr(len(r.buffer)) { |
||||
return 0, newInvalidDatabaseError("the MaxMind DB file's search tree is corrupt") |
||||
} |
||||
return resolved, nil |
||||
} |
||||
@ -0,0 +1,28 @@ |
||||
// +build appengine plan9
|
||||
|
||||
package maxminddb |
||||
|
||||
import "io/ioutil" |
||||
|
||||
// Open takes a string path to a MaxMind DB file and returns a Reader
|
||||
// structure or an error. The database file is opened using a memory map,
|
||||
// except on Google App Engine where mmap is not supported; there the database
|
||||
// is loaded into memory. Use the Close method on the Reader object to return
|
||||
// the resources to the system.
|
||||
func Open(file string) (*Reader, error) { |
||||
bytes, err := ioutil.ReadFile(file) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
return FromBytes(bytes) |
||||
} |
||||
|
||||
// Close unmaps the database file from virtual memory and returns the
|
||||
// resources to the system. If called on a Reader opened using FromBytes
|
||||
// or Open on Google App Engine, this method sets the underlying buffer
|
||||
// to nil, returning the resources to the system.
|
||||
func (r *Reader) Close() error { |
||||
r.buffer = nil |
||||
return nil |
||||
} |
||||
@ -0,0 +1,66 @@ |
||||
//go:build !appengine && !plan9
|
||||
// +build !appengine,!plan9
|
||||
|
||||
package maxminddb |
||||
|
||||
import ( |
||||
"os" |
||||
"runtime" |
||||
) |
||||
|
||||
// Open takes a string path to a MaxMind DB file and returns a Reader
|
||||
// structure or an error. The database file is opened using a memory map,
|
||||
// except on Google App Engine where mmap is not supported; there the database
|
||||
// is loaded into memory. Use the Close method on the Reader object to return
|
||||
// the resources to the system.
|
||||
func Open(file string) (*Reader, error) { |
||||
mapFile, err := os.Open(file) |
||||
if err != nil { |
||||
_ = mapFile.Close() |
||||
return nil, err |
||||
} |
||||
|
||||
stats, err := mapFile.Stat() |
||||
if err != nil { |
||||
_ = mapFile.Close() |
||||
return nil, err |
||||
} |
||||
|
||||
fileSize := int(stats.Size()) |
||||
mmap, err := mmap(int(mapFile.Fd()), fileSize) |
||||
if err != nil { |
||||
_ = mapFile.Close() |
||||
return nil, err |
||||
} |
||||
|
||||
if err := mapFile.Close(); err != nil { |
||||
//nolint:errcheck // we prefer to return the original error
|
||||
munmap(mmap) |
||||
return nil, err |
||||
} |
||||
|
||||
reader, err := FromBytes(mmap) |
||||
if err != nil { |
||||
//nolint:errcheck // we prefer to return the original error
|
||||
munmap(mmap) |
||||
return nil, err |
||||
} |
||||
|
||||
reader.hasMappedFile = true |
||||
runtime.SetFinalizer(reader, (*Reader).Close) |
||||
return reader, nil |
||||
} |
||||
|
||||
// Close unmaps the database file from virtual memory and returns the
|
||||
// resources to the system. If called on a Reader opened using FromBytes
|
||||
// or Open on Google App Engine, this method does nothing.
|
||||
func (r *Reader) Close() error { |
||||
var err error |
||||
if r.hasMappedFile { |
||||
runtime.SetFinalizer(r, nil) |
||||
r.hasMappedFile = false |
||||
err = munmap(r.buffer) |
||||
} |
||||
r.buffer = nil |
||||
return err |
||||
} |
||||
@ -0,0 +1,205 @@ |
||||
package maxminddb |
||||
|
||||
import ( |
||||
"fmt" |
||||
"net" |
||||
) |
||||
|
||||
// Internal structure used to keep track of nodes we still need to visit.
|
||||
type netNode struct { |
||||
ip net.IP |
||||
bit uint |
||||
pointer uint |
||||
} |
||||
|
||||
// Networks represents a set of subnets that we are iterating over.
|
||||
type Networks struct { |
||||
reader *Reader |
||||
nodes []netNode // Nodes we still have to visit.
|
||||
lastNode netNode |
||||
err error |
||||
|
||||
skipAliasedNetworks bool |
||||
} |
||||
|
||||
var ( |
||||
allIPv4 = &net.IPNet{IP: make(net.IP, 4), Mask: net.CIDRMask(0, 32)} |
||||
allIPv6 = &net.IPNet{IP: make(net.IP, 16), Mask: net.CIDRMask(0, 128)} |
||||
) |
||||
|
||||
// NetworksOption are options for Networks and NetworksWithin.
|
||||
type NetworksOption func(*Networks) |
||||
|
||||
// SkipAliasedNetworks is an option for Networks and NetworksWithin that
|
||||
// makes them not iterate over aliases of the IPv4 subtree in an IPv6
|
||||
// database, e.g., ::ffff:0:0/96, 2001::/32, and 2002::/16.
|
||||
//
|
||||
// You most likely want to set this. The only reason it isn't the default
|
||||
// behavior is to provide backwards compatibility to existing users.
|
||||
func SkipAliasedNetworks(networks *Networks) { |
||||
networks.skipAliasedNetworks = true |
||||
} |
||||
|
||||
// Networks returns an iterator that can be used to traverse all networks in
|
||||
// the database.
|
||||
//
|
||||
// Please note that a MaxMind DB may map IPv4 networks into several locations
|
||||
// in an IPv6 database. This iterator will iterate over all of these locations
|
||||
// separately. To only iterate over the IPv4 networks once, use the
|
||||
// SkipAliasedNetworks option.
|
||||
func (r *Reader) Networks(options ...NetworksOption) *Networks { |
||||
var networks *Networks |
||||
if r.Metadata.IPVersion == 6 { |
||||
networks = r.NetworksWithin(allIPv6, options...) |
||||
} else { |
||||
networks = r.NetworksWithin(allIPv4, options...) |
||||
} |
||||
|
||||
return networks |
||||
} |
||||
|
||||
// NetworksWithin returns an iterator that can be used to traverse all networks
|
||||
// in the database which are contained in a given network.
|
||||
//
|
||||
// Please note that a MaxMind DB may map IPv4 networks into several locations
|
||||
// in an IPv6 database. This iterator will iterate over all of these locations
|
||||
// separately. To only iterate over the IPv4 networks once, use the
|
||||
// SkipAliasedNetworks option.
|
||||
//
|
||||
// If the provided network is contained within a network in the database, the
|
||||
// iterator will iterate over exactly one network, the containing network.
|
||||
func (r *Reader) NetworksWithin(network *net.IPNet, options ...NetworksOption) *Networks { |
||||
if r.Metadata.IPVersion == 4 && network.IP.To4() == nil { |
||||
return &Networks{ |
||||
err: fmt.Errorf( |
||||
"error getting networks with '%s': you attempted to use an IPv6 network in an IPv4-only database", |
||||
network.String(), |
||||
), |
||||
} |
||||
} |
||||
|
||||
networks := &Networks{reader: r} |
||||
for _, option := range options { |
||||
option(networks) |
||||
} |
||||
|
||||
ip := network.IP |
||||
prefixLength, _ := network.Mask.Size() |
||||
|
||||
if r.Metadata.IPVersion == 6 && len(ip) == net.IPv4len { |
||||
if networks.skipAliasedNetworks { |
||||
ip = net.IP{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ip[0], ip[1], ip[2], ip[3]} |
||||
} else { |
||||
ip = ip.To16() |
||||
} |
||||
prefixLength += 96 |
||||
} |
||||
|
||||
pointer, bit := r.traverseTree(ip, 0, uint(prefixLength)) |
||||
networks.nodes = []netNode{ |
||||
{ |
||||
ip: ip, |
||||
bit: uint(bit), |
||||
pointer: pointer, |
||||
}, |
||||
} |
||||
|
||||
return networks |
||||
} |
||||
|
||||
// Next prepares the next network for reading with the Network method. It
|
||||
// returns true if there is another network to be processed and false if there
|
||||
// are no more networks or if there is an error.
|
||||
func (n *Networks) Next() bool { |
||||
if n.err != nil { |
||||
return false |
||||
} |
||||
for len(n.nodes) > 0 { |
||||
node := n.nodes[len(n.nodes)-1] |
||||
n.nodes = n.nodes[:len(n.nodes)-1] |
||||
|
||||
for node.pointer != n.reader.Metadata.NodeCount { |
||||
// This skips IPv4 aliases without hardcoding the networks that the writer
|
||||
// currently aliases.
|
||||
if n.skipAliasedNetworks && n.reader.ipv4Start != 0 && |
||||
node.pointer == n.reader.ipv4Start && !isInIPv4Subtree(node.ip) { |
||||
break |
||||
} |
||||
|
||||
if node.pointer > n.reader.Metadata.NodeCount { |
||||
n.lastNode = node |
||||
return true |
||||
} |
||||
ipRight := make(net.IP, len(node.ip)) |
||||
copy(ipRight, node.ip) |
||||
if len(ipRight) <= int(node.bit>>3) { |
||||
n.err = newInvalidDatabaseError( |
||||
"invalid search tree at %v/%v", ipRight, node.bit) |
||||
return false |
||||
} |
||||
ipRight[node.bit>>3] |= 1 << (7 - (node.bit % 8)) |
||||
|
||||
offset := node.pointer * n.reader.nodeOffsetMult |
||||
rightPointer := n.reader.nodeReader.readRight(offset) |
||||
|
||||
node.bit++ |
||||
n.nodes = append(n.nodes, netNode{ |
||||
pointer: rightPointer, |
||||
ip: ipRight, |
||||
bit: node.bit, |
||||
}) |
||||
|
||||
node.pointer = n.reader.nodeReader.readLeft(offset) |
||||
} |
||||
} |
||||
|
||||
return false |
||||
} |
||||
|
||||
// Network returns the current network or an error if there is a problem
|
||||
// decoding the data for the network. It takes a pointer to a result value to
|
||||
// decode the network's data into.
|
||||
func (n *Networks) Network(result interface{}) (*net.IPNet, error) { |
||||
if n.err != nil { |
||||
return nil, n.err |
||||
} |
||||
if err := n.reader.retrieveData(n.lastNode.pointer, result); err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
ip := n.lastNode.ip |
||||
prefixLength := int(n.lastNode.bit) |
||||
|
||||
// We do this because uses of SkipAliasedNetworks expect the IPv4 networks
|
||||
// to be returned as IPv4 networks. If we are not skipping aliased
|
||||
// networks, then the user will get IPv4 networks from the ::FFFF:0:0/96
|
||||
// network as Go automatically converts those.
|
||||
if n.skipAliasedNetworks && isInIPv4Subtree(ip) { |
||||
ip = ip[12:] |
||||
prefixLength -= 96 |
||||
} |
||||
|
||||
return &net.IPNet{ |
||||
IP: ip, |
||||
Mask: net.CIDRMask(prefixLength, len(ip)*8), |
||||
}, nil |
||||
} |
||||
|
||||
// Err returns an error, if any, that was encountered during iteration.
|
||||
func (n *Networks) Err() error { |
||||
return n.err |
||||
} |
||||
|
||||
// isInIPv4Subtree returns true if the IP is an IPv6 address in the database's
|
||||
// IPv4 subtree.
|
||||
func isInIPv4Subtree(ip net.IP) bool { |
||||
if len(ip) != 16 { |
||||
return false |
||||
} |
||||
for i := 0; i < 12; i++ { |
||||
if ip[i] != 0 { |
||||
return false |
||||
} |
||||
} |
||||
return true |
||||
} |
||||
@ -0,0 +1,201 @@ |
||||
package maxminddb |
||||
|
||||
import ( |
||||
"reflect" |
||||
"runtime" |
||||
) |
||||
|
||||
type verifier struct { |
||||
reader *Reader |
||||
} |
||||
|
||||
// Verify checks that the database is valid. It validates the search tree,
|
||||
// the data section, and the metadata section. This verifier is stricter than
|
||||
// the specification and may return errors on databases that are readable.
|
||||
func (r *Reader) Verify() error { |
||||
v := verifier{r} |
||||
if err := v.verifyMetadata(); err != nil { |
||||
return err |
||||
} |
||||
|
||||
err := v.verifyDatabase() |
||||
runtime.KeepAlive(v.reader) |
||||
return err |
||||
} |
||||
|
||||
func (v *verifier) verifyMetadata() error { |
||||
metadata := v.reader.Metadata |
||||
|
||||
if metadata.BinaryFormatMajorVersion != 2 { |
||||
return testError( |
||||
"binary_format_major_version", |
||||
2, |
||||
metadata.BinaryFormatMajorVersion, |
||||
) |
||||
} |
||||
|
||||
if metadata.BinaryFormatMinorVersion != 0 { |
||||
return testError( |
||||
"binary_format_minor_version", |
||||
0, |
||||
metadata.BinaryFormatMinorVersion, |
||||
) |
||||
} |
||||
|
||||
if metadata.DatabaseType == "" { |
||||
return testError( |
||||
"database_type", |
||||
"non-empty string", |
||||
metadata.DatabaseType, |
||||
) |
||||
} |
||||
|
||||
if len(metadata.Description) == 0 { |
||||
return testError( |
||||
"description", |
||||
"non-empty slice", |
||||
metadata.Description, |
||||
) |
||||
} |
||||
|
||||
if metadata.IPVersion != 4 && metadata.IPVersion != 6 { |
||||
return testError( |
||||
"ip_version", |
||||
"4 or 6", |
||||
metadata.IPVersion, |
||||
) |
||||
} |
||||
|
||||
if metadata.RecordSize != 24 && |
||||
metadata.RecordSize != 28 && |
||||
metadata.RecordSize != 32 { |
||||
return testError( |
||||
"record_size", |
||||
"24, 28, or 32", |
||||
metadata.RecordSize, |
||||
) |
||||
} |
||||
|
||||
if metadata.NodeCount == 0 { |
||||
return testError( |
||||
"node_count", |
||||
"positive integer", |
||||
metadata.NodeCount, |
||||
) |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func (v *verifier) verifyDatabase() error { |
||||
offsets, err := v.verifySearchTree() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
if err := v.verifyDataSectionSeparator(); err != nil { |
||||
return err |
||||
} |
||||
|
||||
return v.verifyDataSection(offsets) |
||||
} |
||||
|
||||
func (v *verifier) verifySearchTree() (map[uint]bool, error) { |
||||
offsets := make(map[uint]bool) |
||||
|
||||
it := v.reader.Networks() |
||||
for it.Next() { |
||||
offset, err := v.reader.resolveDataPointer(it.lastNode.pointer) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
offsets[uint(offset)] = true |
||||
} |
||||
if err := it.Err(); err != nil { |
||||
return nil, err |
||||
} |
||||
return offsets, nil |
||||
} |
||||
|
||||
func (v *verifier) verifyDataSectionSeparator() error { |
||||
separatorStart := v.reader.Metadata.NodeCount * v.reader.Metadata.RecordSize / 4 |
||||
|
||||
separator := v.reader.buffer[separatorStart : separatorStart+dataSectionSeparatorSize] |
||||
|
||||
for _, b := range separator { |
||||
if b != 0 { |
||||
return newInvalidDatabaseError("unexpected byte in data separator: %v", separator) |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func (v *verifier) verifyDataSection(offsets map[uint]bool) error { |
||||
pointerCount := len(offsets) |
||||
|
||||
decoder := v.reader.decoder |
||||
|
||||
var offset uint |
||||
bufferLen := uint(len(decoder.buffer)) |
||||
for offset < bufferLen { |
||||
var data interface{} |
||||
rv := reflect.ValueOf(&data) |
||||
newOffset, err := decoder.decode(offset, rv, 0) |
||||
if err != nil { |
||||
return newInvalidDatabaseError( |
||||
"received decoding error (%v) at offset of %v", |
||||
err, |
||||
offset, |
||||
) |
||||
} |
||||
if newOffset <= offset { |
||||
return newInvalidDatabaseError( |
||||
"data section offset unexpectedly went from %v to %v", |
||||
offset, |
||||
newOffset, |
||||
) |
||||
} |
||||
|
||||
pointer := offset |
||||
|
||||
if _, ok := offsets[pointer]; !ok { |
||||
return newInvalidDatabaseError( |
||||
"found data (%v) at %v that the search tree does not point to", |
||||
data, |
||||
pointer, |
||||
) |
||||
} |
||||
delete(offsets, pointer) |
||||
|
||||
offset = newOffset |
||||
} |
||||
|
||||
if offset != bufferLen { |
||||
return newInvalidDatabaseError( |
||||
"unexpected data at the end of the data section (last offset: %v, end: %v)", |
||||
offset, |
||||
bufferLen, |
||||
) |
||||
} |
||||
|
||||
if len(offsets) != 0 { |
||||
return newInvalidDatabaseError( |
||||
"found %v pointers (of %v) in the search tree that we did not see in the data section", |
||||
len(offsets), |
||||
pointerCount, |
||||
) |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func testError( |
||||
field string, |
||||
expected interface{}, |
||||
actual interface{}, |
||||
) error { |
||||
return newInvalidDatabaseError( |
||||
"%v - Expected: %v Actual: %v", |
||||
field, |
||||
expected, |
||||
actual, |
||||
) |
||||
} |
||||
Loading…
Reference in new issue