mirror of
https://github.com/SagerNet/sing-box.git
synced 2024-11-24 01:21:30 +00:00
Add rule-set
This commit is contained in:
parent
411f02ee4f
commit
bb70e8267e
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -1,6 +1,7 @@
|
||||||
/.idea/
|
/.idea/
|
||||||
/vendor/
|
/vendor/
|
||||||
/*.json
|
/*.json
|
||||||
|
/*.srs
|
||||||
/*.db
|
/*.db
|
||||||
/site/
|
/site/
|
||||||
/bin/
|
/bin/
|
||||||
|
|
|
@ -1,11 +1,16 @@
|
||||||
package adapter
|
package adapter
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/binary"
|
||||||
|
"io"
|
||||||
"net"
|
"net"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/sagernet/sing-box/common/urltest"
|
"github.com/sagernet/sing-box/common/urltest"
|
||||||
N "github.com/sagernet/sing/common/network"
|
N "github.com/sagernet/sing/common/network"
|
||||||
|
"github.com/sagernet/sing/common/rw"
|
||||||
)
|
)
|
||||||
|
|
||||||
type ClashServer interface {
|
type ClashServer interface {
|
||||||
|
@ -23,6 +28,7 @@ type CacheFile interface {
|
||||||
PreStarter
|
PreStarter
|
||||||
|
|
||||||
StoreFakeIP() bool
|
StoreFakeIP() bool
|
||||||
|
FakeIPStorage
|
||||||
|
|
||||||
LoadMode() string
|
LoadMode() string
|
||||||
StoreMode(mode string) error
|
StoreMode(mode string) error
|
||||||
|
@ -30,7 +36,65 @@ type CacheFile interface {
|
||||||
StoreSelected(group string, selected string) error
|
StoreSelected(group string, selected string) error
|
||||||
LoadGroupExpand(group string) (isExpand bool, loaded bool)
|
LoadGroupExpand(group string) (isExpand bool, loaded bool)
|
||||||
StoreGroupExpand(group string, expand bool) error
|
StoreGroupExpand(group string, expand bool) error
|
||||||
FakeIPStorage
|
LoadRuleSet(tag string) *SavedRuleSet
|
||||||
|
SaveRuleSet(tag string, set *SavedRuleSet) error
|
||||||
|
}
|
||||||
|
|
||||||
|
type SavedRuleSet struct {
|
||||||
|
Content []byte
|
||||||
|
LastUpdated time.Time
|
||||||
|
LastEtag string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SavedRuleSet) MarshalBinary() ([]byte, error) {
|
||||||
|
var buffer bytes.Buffer
|
||||||
|
err := binary.Write(&buffer, binary.BigEndian, uint8(1))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
err = rw.WriteUVariant(&buffer, uint64(len(s.Content)))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
buffer.Write(s.Content)
|
||||||
|
err = binary.Write(&buffer, binary.BigEndian, s.LastUpdated.Unix())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
err = rw.WriteVString(&buffer, s.LastEtag)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return buffer.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SavedRuleSet) UnmarshalBinary(data []byte) error {
|
||||||
|
reader := bytes.NewReader(data)
|
||||||
|
var version uint8
|
||||||
|
err := binary.Read(reader, binary.BigEndian, &version)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
contentLen, err := rw.ReadUVariant(reader)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
s.Content = make([]byte, contentLen)
|
||||||
|
_, err = io.ReadFull(reader, s.Content)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var lastUpdated int64
|
||||||
|
err = binary.Read(reader, binary.BigEndian, &lastUpdated)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
s.LastUpdated = time.Unix(lastUpdated, 0)
|
||||||
|
s.LastEtag, err = rw.ReadVString(reader)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type Tracker interface {
|
type Tracker interface {
|
||||||
|
|
|
@ -46,11 +46,24 @@ type InboundContext struct {
|
||||||
SourceGeoIPCode string
|
SourceGeoIPCode string
|
||||||
GeoIPCode string
|
GeoIPCode string
|
||||||
ProcessInfo *process.Info
|
ProcessInfo *process.Info
|
||||||
|
QueryType uint16
|
||||||
FakeIP bool
|
FakeIP bool
|
||||||
|
|
||||||
// dns cache
|
// rule cache
|
||||||
|
|
||||||
QueryType uint16
|
IPCIDRMatchSource bool
|
||||||
|
SourceAddressMatch bool
|
||||||
|
SourcePortMatch bool
|
||||||
|
DestinationAddressMatch bool
|
||||||
|
DestinationPortMatch bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *InboundContext) ResetRuleCache() {
|
||||||
|
c.IPCIDRMatchSource = false
|
||||||
|
c.SourceAddressMatch = false
|
||||||
|
c.SourcePortMatch = false
|
||||||
|
c.DestinationAddressMatch = false
|
||||||
|
c.DestinationPortMatch = false
|
||||||
}
|
}
|
||||||
|
|
||||||
type inboundContextKey struct{}
|
type inboundContextKey struct{}
|
||||||
|
|
|
@ -2,12 +2,14 @@ package adapter
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"net/http"
|
||||||
"net/netip"
|
"net/netip"
|
||||||
|
|
||||||
"github.com/sagernet/sing-box/common/geoip"
|
"github.com/sagernet/sing-box/common/geoip"
|
||||||
"github.com/sagernet/sing-dns"
|
"github.com/sagernet/sing-dns"
|
||||||
"github.com/sagernet/sing-tun"
|
"github.com/sagernet/sing-tun"
|
||||||
"github.com/sagernet/sing/common/control"
|
"github.com/sagernet/sing/common/control"
|
||||||
|
N "github.com/sagernet/sing/common/network"
|
||||||
"github.com/sagernet/sing/service"
|
"github.com/sagernet/sing/service"
|
||||||
|
|
||||||
mdns "github.com/miekg/dns"
|
mdns "github.com/miekg/dns"
|
||||||
|
@ -19,7 +21,7 @@ type Router interface {
|
||||||
|
|
||||||
Outbounds() []Outbound
|
Outbounds() []Outbound
|
||||||
Outbound(tag string) (Outbound, bool)
|
Outbound(tag string) (Outbound, bool)
|
||||||
DefaultOutbound(network string) Outbound
|
DefaultOutbound(network string) (Outbound, error)
|
||||||
|
|
||||||
FakeIPStore() FakeIPStore
|
FakeIPStore() FakeIPStore
|
||||||
|
|
||||||
|
@ -28,6 +30,8 @@ type Router interface {
|
||||||
GeoIPReader() *geoip.Reader
|
GeoIPReader() *geoip.Reader
|
||||||
LoadGeosite(code string) (Rule, error)
|
LoadGeosite(code string) (Rule, error)
|
||||||
|
|
||||||
|
RuleSet(tag string) (RuleSet, bool)
|
||||||
|
|
||||||
Exchange(ctx context.Context, message *mdns.Msg) (*mdns.Msg, error)
|
Exchange(ctx context.Context, message *mdns.Msg) (*mdns.Msg, error)
|
||||||
Lookup(ctx context.Context, domain string, strategy dns.DomainStrategy) ([]netip.Addr, error)
|
Lookup(ctx context.Context, domain string, strategy dns.DomainStrategy) ([]netip.Addr, error)
|
||||||
LookupDefault(ctx context.Context, domain string) ([]netip.Addr, error)
|
LookupDefault(ctx context.Context, domain string) ([]netip.Addr, error)
|
||||||
|
@ -62,11 +66,15 @@ func RouterFromContext(ctx context.Context) Router {
|
||||||
return service.FromContext[Router](ctx)
|
return service.FromContext[Router](ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type HeadlessRule interface {
|
||||||
|
Match(metadata *InboundContext) bool
|
||||||
|
}
|
||||||
|
|
||||||
type Rule interface {
|
type Rule interface {
|
||||||
|
HeadlessRule
|
||||||
Service
|
Service
|
||||||
Type() string
|
Type() string
|
||||||
UpdateGeosite() error
|
UpdateGeosite() error
|
||||||
Match(metadata *InboundContext) bool
|
|
||||||
Outbound() string
|
Outbound() string
|
||||||
String() string
|
String() string
|
||||||
}
|
}
|
||||||
|
@ -77,6 +85,24 @@ type DNSRule interface {
|
||||||
RewriteTTL() *uint32
|
RewriteTTL() *uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type RuleSet interface {
|
||||||
|
StartContext(ctx context.Context, startContext RuleSetStartContext) error
|
||||||
|
PostStart() error
|
||||||
|
Metadata() RuleSetMetadata
|
||||||
|
Close() error
|
||||||
|
HeadlessRule
|
||||||
|
}
|
||||||
|
|
||||||
|
type RuleSetMetadata struct {
|
||||||
|
ContainsProcessRule bool
|
||||||
|
ContainsWIFIRule bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type RuleSetStartContext interface {
|
||||||
|
HTTPClient(detour string, dialer N.Dialer) *http.Client
|
||||||
|
Close()
|
||||||
|
}
|
||||||
|
|
||||||
type InterfaceUpdateListener interface {
|
type InterfaceUpdateListener interface {
|
||||||
InterfaceUpdated()
|
InterfaceUpdated()
|
||||||
}
|
}
|
||||||
|
|
4
box.go
4
box.go
|
@ -308,6 +308,10 @@ func (s *Box) postStart() error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
s.logger.Trace("post-starting router")
|
s.logger.Trace("post-starting router")
|
||||||
|
err := s.router.PostStart()
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "post-start router")
|
||||||
|
}
|
||||||
return s.router.PostStart()
|
return s.router.PostStart()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -7,7 +7,6 @@ import (
|
||||||
|
|
||||||
"github.com/sagernet/sing-box/common/json"
|
"github.com/sagernet/sing-box/common/json"
|
||||||
"github.com/sagernet/sing-box/log"
|
"github.com/sagernet/sing-box/log"
|
||||||
"github.com/sagernet/sing-box/option"
|
|
||||||
E "github.com/sagernet/sing/common/exceptions"
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
@ -69,41 +68,3 @@ func format() error {
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func formatOne(configPath string) error {
|
|
||||||
configContent, err := os.ReadFile(configPath)
|
|
||||||
if err != nil {
|
|
||||||
return E.Cause(err, "read config")
|
|
||||||
}
|
|
||||||
var options option.Options
|
|
||||||
err = options.UnmarshalJSON(configContent)
|
|
||||||
if err != nil {
|
|
||||||
return E.Cause(err, "decode config")
|
|
||||||
}
|
|
||||||
buffer := new(bytes.Buffer)
|
|
||||||
encoder := json.NewEncoder(buffer)
|
|
||||||
encoder.SetIndent("", " ")
|
|
||||||
err = encoder.Encode(options)
|
|
||||||
if err != nil {
|
|
||||||
return E.Cause(err, "encode config")
|
|
||||||
}
|
|
||||||
if !commandFormatFlagWrite {
|
|
||||||
os.Stdout.WriteString(buffer.String() + "\n")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if bytes.Equal(configContent, buffer.Bytes()) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
output, err := os.Create(configPath)
|
|
||||||
if err != nil {
|
|
||||||
return E.Cause(err, "open output")
|
|
||||||
}
|
|
||||||
_, err = output.Write(buffer.Bytes())
|
|
||||||
output.Close()
|
|
||||||
if err != nil {
|
|
||||||
return E.Cause(err, "write output")
|
|
||||||
}
|
|
||||||
outputPath, _ := filepath.Abs(configPath)
|
|
||||||
os.Stderr.WriteString(outputPath + "\n")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
43
cmd/sing-box/cmd_geoip.go
Normal file
43
cmd/sing-box/cmd_geoip.go
Normal file
|
@ -0,0 +1,43 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/sagernet/sing-box/log"
|
||||||
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
|
|
||||||
|
"github.com/oschwald/maxminddb-golang"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
geoipReader *maxminddb.Reader
|
||||||
|
commandGeoIPFlagFile string
|
||||||
|
)
|
||||||
|
|
||||||
|
var commandGeoip = &cobra.Command{
|
||||||
|
Use: "geoip",
|
||||||
|
Short: "GeoIP tools",
|
||||||
|
PersistentPreRun: func(cmd *cobra.Command, args []string) {
|
||||||
|
err := geoipPreRun()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
commandGeoip.PersistentFlags().StringVarP(&commandGeoIPFlagFile, "file", "f", "geoip.db", "geoip file")
|
||||||
|
mainCommand.AddCommand(commandGeoip)
|
||||||
|
}
|
||||||
|
|
||||||
|
func geoipPreRun() error {
|
||||||
|
reader, err := maxminddb.Open(commandGeoIPFlagFile)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if reader.Metadata.DatabaseType != "sing-geoip" {
|
||||||
|
reader.Close()
|
||||||
|
return E.New("incorrect database type, expected sing-geoip, got ", reader.Metadata.DatabaseType)
|
||||||
|
}
|
||||||
|
geoipReader = reader
|
||||||
|
return nil
|
||||||
|
}
|
98
cmd/sing-box/cmd_geoip_export.go
Normal file
98
cmd/sing-box/cmd_geoip_export.go
Normal file
|
@ -0,0 +1,98 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/sagernet/sing-box/common/json"
|
||||||
|
C "github.com/sagernet/sing-box/constant"
|
||||||
|
"github.com/sagernet/sing-box/log"
|
||||||
|
"github.com/sagernet/sing-box/option"
|
||||||
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
|
|
||||||
|
"github.com/oschwald/maxminddb-golang"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
var flagGeoipExportOutput string
|
||||||
|
|
||||||
|
const flagGeoipExportDefaultOutput = "geoip-<country>.srs"
|
||||||
|
|
||||||
|
var commandGeoipExport = &cobra.Command{
|
||||||
|
Use: "export <country>",
|
||||||
|
Short: "Export geoip country as rule-set",
|
||||||
|
Args: cobra.ExactArgs(1),
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
err := geoipExport(args[0])
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
commandGeoipExport.Flags().StringVarP(&flagGeoipExportOutput, "output", "o", flagGeoipExportDefaultOutput, "Output path")
|
||||||
|
commandGeoip.AddCommand(commandGeoipExport)
|
||||||
|
}
|
||||||
|
|
||||||
|
func geoipExport(countryCode string) error {
|
||||||
|
networks := geoipReader.Networks(maxminddb.SkipAliasedNetworks)
|
||||||
|
countryMap := make(map[string][]*net.IPNet)
|
||||||
|
var (
|
||||||
|
ipNet *net.IPNet
|
||||||
|
nextCountryCode string
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
for networks.Next() {
|
||||||
|
ipNet, err = networks.Network(&nextCountryCode)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
countryMap[nextCountryCode] = append(countryMap[nextCountryCode], ipNet)
|
||||||
|
}
|
||||||
|
ipNets := countryMap[strings.ToLower(countryCode)]
|
||||||
|
if len(ipNets) == 0 {
|
||||||
|
return E.New("country code not found: ", countryCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
outputFile *os.File
|
||||||
|
outputWriter io.Writer
|
||||||
|
)
|
||||||
|
if flagGeoipExportOutput == "stdout" {
|
||||||
|
outputWriter = os.Stdout
|
||||||
|
} else if flagGeoipExportOutput == flagGeoipExportDefaultOutput {
|
||||||
|
outputFile, err = os.Create("geoip-" + countryCode + ".json")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer outputFile.Close()
|
||||||
|
outputWriter = outputFile
|
||||||
|
} else {
|
||||||
|
outputFile, err = os.Create(flagGeoipExportOutput)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer outputFile.Close()
|
||||||
|
outputWriter = outputFile
|
||||||
|
}
|
||||||
|
|
||||||
|
encoder := json.NewEncoder(outputWriter)
|
||||||
|
encoder.SetIndent("", " ")
|
||||||
|
var headlessRule option.DefaultHeadlessRule
|
||||||
|
headlessRule.IPCIDR = make([]string, 0, len(ipNets))
|
||||||
|
for _, cidr := range ipNets {
|
||||||
|
headlessRule.IPCIDR = append(headlessRule.IPCIDR, cidr.String())
|
||||||
|
}
|
||||||
|
var plainRuleSet option.PlainRuleSetCompat
|
||||||
|
plainRuleSet.Version = C.RuleSetVersion1
|
||||||
|
plainRuleSet.Options.Rules = []option.HeadlessRule{
|
||||||
|
{
|
||||||
|
Type: C.RuleTypeDefault,
|
||||||
|
DefaultOptions: headlessRule,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return encoder.Encode(plainRuleSet)
|
||||||
|
}
|
31
cmd/sing-box/cmd_geoip_list.go
Normal file
31
cmd/sing-box/cmd_geoip_list.go
Normal file
|
@ -0,0 +1,31 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/sagernet/sing-box/log"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
var commandGeoipList = &cobra.Command{
|
||||||
|
Use: "list",
|
||||||
|
Short: "List geoip country codes",
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
err := listGeoip()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
commandGeoip.AddCommand(commandGeoipList)
|
||||||
|
}
|
||||||
|
|
||||||
|
func listGeoip() error {
|
||||||
|
for _, code := range geoipReader.Metadata.Languages {
|
||||||
|
os.Stdout.WriteString(code + "\n")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
47
cmd/sing-box/cmd_geoip_lookup.go
Normal file
47
cmd/sing-box/cmd_geoip_lookup.go
Normal file
|
@ -0,0 +1,47 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/netip"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/sagernet/sing-box/log"
|
||||||
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
|
N "github.com/sagernet/sing/common/network"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
var commandGeoipLookup = &cobra.Command{
|
||||||
|
Use: "lookup <address>",
|
||||||
|
Short: "Lookup if an IP address is contained in the GeoIP database",
|
||||||
|
Args: cobra.ExactArgs(1),
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
err := geoipLookup(args[0])
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
commandGeoip.AddCommand(commandGeoipLookup)
|
||||||
|
}
|
||||||
|
|
||||||
|
func geoipLookup(address string) error {
|
||||||
|
addr, err := netip.ParseAddr(address)
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "parse address")
|
||||||
|
}
|
||||||
|
if !N.IsPublicAddr(addr) {
|
||||||
|
os.Stdout.WriteString("private\n")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var code string
|
||||||
|
_ = geoipReader.Lookup(addr.AsSlice(), &code)
|
||||||
|
if code != "" {
|
||||||
|
os.Stdout.WriteString(code + "\n")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
os.Stdout.WriteString("unknown\n")
|
||||||
|
return nil
|
||||||
|
}
|
41
cmd/sing-box/cmd_geosite.go
Normal file
41
cmd/sing-box/cmd_geosite.go
Normal file
|
@ -0,0 +1,41 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/sagernet/sing-box/common/geosite"
|
||||||
|
"github.com/sagernet/sing-box/log"
|
||||||
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
commandGeoSiteFlagFile string
|
||||||
|
geositeReader *geosite.Reader
|
||||||
|
geositeCodeList []string
|
||||||
|
)
|
||||||
|
|
||||||
|
var commandGeoSite = &cobra.Command{
|
||||||
|
Use: "geosite",
|
||||||
|
Short: "Geosite tools",
|
||||||
|
PersistentPreRun: func(cmd *cobra.Command, args []string) {
|
||||||
|
err := geositePreRun()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
commandGeoSite.PersistentFlags().StringVarP(&commandGeoSiteFlagFile, "file", "f", "geosite.db", "geosite file")
|
||||||
|
mainCommand.AddCommand(commandGeoSite)
|
||||||
|
}
|
||||||
|
|
||||||
|
func geositePreRun() error {
|
||||||
|
reader, codeList, err := geosite.Open(commandGeoSiteFlagFile)
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "open geosite file")
|
||||||
|
}
|
||||||
|
geositeReader = reader
|
||||||
|
geositeCodeList = codeList
|
||||||
|
return nil
|
||||||
|
}
|
81
cmd/sing-box/cmd_geosite_export.go
Normal file
81
cmd/sing-box/cmd_geosite_export.go
Normal file
|
@ -0,0 +1,81 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/sagernet/sing-box/common/geosite"
|
||||||
|
C "github.com/sagernet/sing-box/constant"
|
||||||
|
"github.com/sagernet/sing-box/log"
|
||||||
|
"github.com/sagernet/sing-box/option"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
var commandGeositeExportOutput string
|
||||||
|
|
||||||
|
const commandGeositeExportDefaultOutput = "geosite-<category>.json"
|
||||||
|
|
||||||
|
var commandGeositeExport = &cobra.Command{
|
||||||
|
Use: "export <category>",
|
||||||
|
Short: "Export geosite category as rule-set",
|
||||||
|
Args: cobra.ExactArgs(1),
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
err := geositeExport(args[0])
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
commandGeositeExport.Flags().StringVarP(&commandGeositeExportOutput, "output", "o", commandGeositeExportDefaultOutput, "Output path")
|
||||||
|
commandGeoSite.AddCommand(commandGeositeExport)
|
||||||
|
}
|
||||||
|
|
||||||
|
func geositeExport(category string) error {
|
||||||
|
sourceSet, err := geositeReader.Read(category)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
outputFile *os.File
|
||||||
|
outputWriter io.Writer
|
||||||
|
)
|
||||||
|
if commandGeositeExportOutput == "stdout" {
|
||||||
|
outputWriter = os.Stdout
|
||||||
|
} else if commandGeositeExportOutput == commandGeositeExportDefaultOutput {
|
||||||
|
outputFile, err = os.Create("geosite-" + category + ".json")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer outputFile.Close()
|
||||||
|
outputWriter = outputFile
|
||||||
|
} else {
|
||||||
|
outputFile, err = os.Create(commandGeositeExportOutput)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer outputFile.Close()
|
||||||
|
outputWriter = outputFile
|
||||||
|
}
|
||||||
|
|
||||||
|
encoder := json.NewEncoder(outputWriter)
|
||||||
|
encoder.SetIndent("", " ")
|
||||||
|
var headlessRule option.DefaultHeadlessRule
|
||||||
|
defaultRule := geosite.Compile(sourceSet)
|
||||||
|
headlessRule.Domain = defaultRule.Domain
|
||||||
|
headlessRule.DomainSuffix = defaultRule.DomainSuffix
|
||||||
|
headlessRule.DomainKeyword = defaultRule.DomainKeyword
|
||||||
|
headlessRule.DomainRegex = defaultRule.DomainRegex
|
||||||
|
var plainRuleSet option.PlainRuleSetCompat
|
||||||
|
plainRuleSet.Version = C.RuleSetVersion1
|
||||||
|
plainRuleSet.Options.Rules = []option.HeadlessRule{
|
||||||
|
{
|
||||||
|
Type: C.RuleTypeDefault,
|
||||||
|
DefaultOptions: headlessRule,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return encoder.Encode(plainRuleSet)
|
||||||
|
}
|
50
cmd/sing-box/cmd_geosite_list.go
Normal file
50
cmd/sing-box/cmd_geosite_list.go
Normal file
|
@ -0,0 +1,50 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
"github.com/sagernet/sing-box/log"
|
||||||
|
F "github.com/sagernet/sing/common/format"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
var commandGeositeList = &cobra.Command{
|
||||||
|
Use: "list <category>",
|
||||||
|
Short: "List geosite categories",
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
err := geositeList()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
commandGeoSite.AddCommand(commandGeositeList)
|
||||||
|
}
|
||||||
|
|
||||||
|
func geositeList() error {
|
||||||
|
var geositeEntry []struct {
|
||||||
|
category string
|
||||||
|
items int
|
||||||
|
}
|
||||||
|
for _, category := range geositeCodeList {
|
||||||
|
sourceSet, err := geositeReader.Read(category)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
geositeEntry = append(geositeEntry, struct {
|
||||||
|
category string
|
||||||
|
items int
|
||||||
|
}{category, len(sourceSet)})
|
||||||
|
}
|
||||||
|
sort.SliceStable(geositeEntry, func(i, j int) bool {
|
||||||
|
return geositeEntry[i].items < geositeEntry[j].items
|
||||||
|
})
|
||||||
|
for _, entry := range geositeEntry {
|
||||||
|
os.Stdout.WriteString(F.ToString(entry.category, " (", entry.items, ")\n"))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
97
cmd/sing-box/cmd_geosite_lookup.go
Normal file
97
cmd/sing-box/cmd_geosite_lookup.go
Normal file
|
@ -0,0 +1,97 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
"github.com/sagernet/sing-box/log"
|
||||||
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
var commandGeositeLookup = &cobra.Command{
|
||||||
|
Use: "lookup [category] <domain>",
|
||||||
|
Short: "Check if a domain is in the geosite",
|
||||||
|
Args: cobra.RangeArgs(1, 2),
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
var (
|
||||||
|
source string
|
||||||
|
target string
|
||||||
|
)
|
||||||
|
switch len(args) {
|
||||||
|
case 1:
|
||||||
|
target = args[0]
|
||||||
|
case 2:
|
||||||
|
source = args[0]
|
||||||
|
target = args[1]
|
||||||
|
}
|
||||||
|
err := geositeLookup(source, target)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
commandGeoSite.AddCommand(commandGeositeLookup)
|
||||||
|
}
|
||||||
|
|
||||||
|
func geositeLookup(source string, target string) error {
|
||||||
|
var sourceMatcherList []struct {
|
||||||
|
code string
|
||||||
|
matcher *searchGeositeMatcher
|
||||||
|
}
|
||||||
|
if source != "" {
|
||||||
|
sourceSet, err := geositeReader.Read(source)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
sourceMatcher, err := newSearchGeositeMatcher(sourceSet)
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "compile code: "+source)
|
||||||
|
}
|
||||||
|
sourceMatcherList = []struct {
|
||||||
|
code string
|
||||||
|
matcher *searchGeositeMatcher
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
code: source,
|
||||||
|
matcher: sourceMatcher,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
} else {
|
||||||
|
for _, code := range geositeCodeList {
|
||||||
|
sourceSet, err := geositeReader.Read(code)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
sourceMatcher, err := newSearchGeositeMatcher(sourceSet)
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "compile code: "+code)
|
||||||
|
}
|
||||||
|
sourceMatcherList = append(sourceMatcherList, struct {
|
||||||
|
code string
|
||||||
|
matcher *searchGeositeMatcher
|
||||||
|
}{
|
||||||
|
code: code,
|
||||||
|
matcher: sourceMatcher,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sort.SliceStable(sourceMatcherList, func(i, j int) bool {
|
||||||
|
return sourceMatcherList[i].code < sourceMatcherList[j].code
|
||||||
|
})
|
||||||
|
|
||||||
|
for _, matcherItem := range sourceMatcherList {
|
||||||
|
if matchRule := matcherItem.matcher.Match(target); matchRule != "" {
|
||||||
|
os.Stdout.WriteString("Match code (")
|
||||||
|
os.Stdout.WriteString(matcherItem.code)
|
||||||
|
os.Stdout.WriteString(") ")
|
||||||
|
os.Stdout.WriteString(matchRule)
|
||||||
|
os.Stdout.WriteString("\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
56
cmd/sing-box/cmd_geosite_matcher.go
Normal file
56
cmd/sing-box/cmd_geosite_matcher.go
Normal file
|
@ -0,0 +1,56 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/sagernet/sing-box/common/geosite"
|
||||||
|
)
|
||||||
|
|
||||||
|
type searchGeositeMatcher struct {
|
||||||
|
domainMap map[string]bool
|
||||||
|
suffixList []string
|
||||||
|
keywordList []string
|
||||||
|
regexList []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func newSearchGeositeMatcher(items []geosite.Item) (*searchGeositeMatcher, error) {
|
||||||
|
options := geosite.Compile(items)
|
||||||
|
domainMap := make(map[string]bool)
|
||||||
|
for _, domain := range options.Domain {
|
||||||
|
domainMap[domain] = true
|
||||||
|
}
|
||||||
|
rule := &searchGeositeMatcher{
|
||||||
|
domainMap: domainMap,
|
||||||
|
suffixList: options.DomainSuffix,
|
||||||
|
keywordList: options.DomainKeyword,
|
||||||
|
regexList: options.DomainRegex,
|
||||||
|
}
|
||||||
|
return rule, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *searchGeositeMatcher) Match(domain string) string {
|
||||||
|
if r.domainMap[domain] {
|
||||||
|
return "domain=" + domain
|
||||||
|
}
|
||||||
|
for _, suffix := range r.suffixList {
|
||||||
|
if strings.HasSuffix(domain, suffix) {
|
||||||
|
return "domain_suffix=" + suffix
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, keyword := range r.keywordList {
|
||||||
|
if strings.Contains(domain, keyword) {
|
||||||
|
return "domain_keyword=" + keyword
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, regexStr := range r.regexList {
|
||||||
|
regex, err := regexp.Compile(regexStr)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if regex.MatchString(domain) {
|
||||||
|
return "domain_regex=" + regexStr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
|
@ -18,7 +18,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
var commandMerge = &cobra.Command{
|
var commandMerge = &cobra.Command{
|
||||||
Use: "merge [output]",
|
Use: "merge <output>",
|
||||||
Short: "Merge configurations",
|
Short: "Merge configurations",
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
err := merge(args[0])
|
err := merge(args[0])
|
||||||
|
|
14
cmd/sing-box/cmd_rule_set.go
Normal file
14
cmd/sing-box/cmd_rule_set.go
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
var commandRuleSet = &cobra.Command{
|
||||||
|
Use: "rule-set",
|
||||||
|
Short: "Manage rule sets",
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
mainCommand.AddCommand(commandRuleSet)
|
||||||
|
}
|
80
cmd/sing-box/cmd_rule_set_compile.go
Normal file
80
cmd/sing-box/cmd_rule_set_compile.go
Normal file
|
@ -0,0 +1,80 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/sagernet/sing-box/common/json"
|
||||||
|
"github.com/sagernet/sing-box/common/srs"
|
||||||
|
"github.com/sagernet/sing-box/log"
|
||||||
|
"github.com/sagernet/sing-box/option"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
var flagRuleSetCompileOutput string
|
||||||
|
|
||||||
|
const flagRuleSetCompileDefaultOutput = "<file_name>.srs"
|
||||||
|
|
||||||
|
var commandRuleSetCompile = &cobra.Command{
|
||||||
|
Use: "compile [source-path]",
|
||||||
|
Short: "Compile rule-set json to binary",
|
||||||
|
Args: cobra.ExactArgs(1),
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
err := compileRuleSet(args[0])
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
commandRuleSet.AddCommand(commandRuleSetCompile)
|
||||||
|
commandRuleSetCompile.Flags().StringVarP(&flagRuleSetCompileOutput, "output", "o", flagRuleSetCompileDefaultOutput, "Output file")
|
||||||
|
}
|
||||||
|
|
||||||
|
func compileRuleSet(sourcePath string) error {
|
||||||
|
var (
|
||||||
|
reader io.Reader
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
if sourcePath == "stdin" {
|
||||||
|
reader = os.Stdin
|
||||||
|
} else {
|
||||||
|
reader, err = os.Open(sourcePath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
decoder := json.NewDecoder(json.NewCommentFilter(reader))
|
||||||
|
decoder.DisallowUnknownFields()
|
||||||
|
var plainRuleSet option.PlainRuleSetCompat
|
||||||
|
err = decoder.Decode(&plainRuleSet)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ruleSet := plainRuleSet.Upgrade()
|
||||||
|
var outputPath string
|
||||||
|
if flagRuleSetCompileOutput == flagRuleSetCompileDefaultOutput {
|
||||||
|
if strings.HasSuffix(sourcePath, ".json") {
|
||||||
|
outputPath = sourcePath[:len(sourcePath)-5] + ".srs"
|
||||||
|
} else {
|
||||||
|
outputPath = sourcePath + ".srs"
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
outputPath = flagRuleSetCompileOutput
|
||||||
|
}
|
||||||
|
outputFile, err := os.Create(outputPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = srs.Write(outputFile, ruleSet)
|
||||||
|
if err != nil {
|
||||||
|
outputFile.Close()
|
||||||
|
os.Remove(outputPath)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
outputFile.Close()
|
||||||
|
return nil
|
||||||
|
}
|
87
cmd/sing-box/cmd_rule_set_format.go
Normal file
87
cmd/sing-box/cmd_rule_set_format.go
Normal file
|
@ -0,0 +1,87 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/sagernet/sing-box/common/json"
|
||||||
|
"github.com/sagernet/sing-box/log"
|
||||||
|
"github.com/sagernet/sing-box/option"
|
||||||
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
var commandRuleSetFormatFlagWrite bool
|
||||||
|
|
||||||
|
var commandRuleSetFormat = &cobra.Command{
|
||||||
|
Use: "format <source-path>",
|
||||||
|
Short: "Format rule-set json",
|
||||||
|
Args: cobra.ExactArgs(1),
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
err := formatRuleSet(args[0])
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
commandRuleSetFormat.Flags().BoolVarP(&commandRuleSetFormatFlagWrite, "write", "w", false, "write result to (source) file instead of stdout")
|
||||||
|
commandRuleSet.AddCommand(commandRuleSetFormat)
|
||||||
|
}
|
||||||
|
|
||||||
|
func formatRuleSet(sourcePath string) error {
|
||||||
|
var (
|
||||||
|
reader io.Reader
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
if sourcePath == "stdin" {
|
||||||
|
reader = os.Stdin
|
||||||
|
} else {
|
||||||
|
reader, err = os.Open(sourcePath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
content, err := io.ReadAll(reader)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
decoder := json.NewDecoder(json.NewCommentFilter(bytes.NewReader(content)))
|
||||||
|
decoder.DisallowUnknownFields()
|
||||||
|
var plainRuleSet option.PlainRuleSetCompat
|
||||||
|
err = decoder.Decode(&plainRuleSet)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ruleSet := plainRuleSet.Upgrade()
|
||||||
|
buffer := new(bytes.Buffer)
|
||||||
|
encoder := json.NewEncoder(buffer)
|
||||||
|
encoder.SetIndent("", " ")
|
||||||
|
err = encoder.Encode(ruleSet)
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "encode config")
|
||||||
|
}
|
||||||
|
outputPath, _ := filepath.Abs(sourcePath)
|
||||||
|
if !commandRuleSetFormatFlagWrite || sourcePath == "stdin" {
|
||||||
|
os.Stdout.WriteString(buffer.String() + "\n")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if bytes.Equal(content, buffer.Bytes()) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
output, err := os.Create(sourcePath)
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "open output")
|
||||||
|
}
|
||||||
|
_, err = output.Write(buffer.Bytes())
|
||||||
|
output.Close()
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "write output")
|
||||||
|
}
|
||||||
|
os.Stderr.WriteString(outputPath + "\n")
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -38,11 +38,7 @@ func createPreStartedClient() (*box.Box, error) {
|
||||||
|
|
||||||
func createDialer(instance *box.Box, network string, outboundTag string) (N.Dialer, error) {
|
func createDialer(instance *box.Box, network string, outboundTag string) (N.Dialer, error) {
|
||||||
if outboundTag == "" {
|
if outboundTag == "" {
|
||||||
outbound := instance.Router().DefaultOutbound(N.NetworkName(network))
|
return instance.Router().DefaultOutbound(N.NetworkName(network))
|
||||||
if outbound == nil {
|
|
||||||
return nil, E.New("missing default outbound")
|
|
||||||
}
|
|
||||||
return outbound, nil
|
|
||||||
} else {
|
} else {
|
||||||
outbound, loaded := instance.Router().Outbound(outboundTag)
|
outbound, loaded := instance.Router().Outbound(outboundTag)
|
||||||
if !loaded {
|
if !loaded {
|
||||||
|
|
|
@ -18,7 +18,7 @@ import (
|
||||||
var commandConnectFlagNetwork string
|
var commandConnectFlagNetwork string
|
||||||
|
|
||||||
var commandConnect = &cobra.Command{
|
var commandConnect = &cobra.Command{
|
||||||
Use: "connect [address]",
|
Use: "connect <address>",
|
||||||
Short: "Connect to an address",
|
Short: "Connect to an address",
|
||||||
Args: cobra.ExactArgs(1),
|
Args: cobra.ExactArgs(1),
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
|
|
@ -18,11 +18,19 @@ func NewRouter(router adapter.Router) N.Dialer {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *RouterDialer) DialContext(ctx context.Context, network string, destination M.Socksaddr) (net.Conn, error) {
|
func (d *RouterDialer) DialContext(ctx context.Context, network string, destination M.Socksaddr) (net.Conn, error) {
|
||||||
return d.router.DefaultOutbound(network).DialContext(ctx, network, destination)
|
dialer, err := d.router.DefaultOutbound(network)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dialer.DialContext(ctx, network, destination)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *RouterDialer) ListenPacket(ctx context.Context, destination M.Socksaddr) (net.PacketConn, error) {
|
func (d *RouterDialer) ListenPacket(ctx context.Context, destination M.Socksaddr) (net.PacketConn, error) {
|
||||||
return d.router.DefaultOutbound(N.NetworkUDP).ListenPacket(ctx, destination)
|
dialer, err := d.router.DefaultOutbound(N.NetworkUDP)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dialer.ListenPacket(ctx, destination)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *RouterDialer) Upstream() any {
|
func (d *RouterDialer) Upstream() any {
|
||||||
|
|
487
common/srs/binary.go
Normal file
487
common/srs/binary.go
Normal file
|
@ -0,0 +1,487 @@
|
||||||
|
package srs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"compress/zlib"
|
||||||
|
"encoding/binary"
|
||||||
|
"io"
|
||||||
|
"net/netip"
|
||||||
|
|
||||||
|
C "github.com/sagernet/sing-box/constant"
|
||||||
|
"github.com/sagernet/sing-box/option"
|
||||||
|
"github.com/sagernet/sing/common"
|
||||||
|
"github.com/sagernet/sing/common/domain"
|
||||||
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
|
"github.com/sagernet/sing/common/rw"
|
||||||
|
|
||||||
|
"go4.org/netipx"
|
||||||
|
)
|
||||||
|
|
||||||
|
var MagicBytes = [3]byte{0x53, 0x52, 0x53} // SRS
|
||||||
|
|
||||||
|
const (
|
||||||
|
ruleItemQueryType uint8 = iota
|
||||||
|
ruleItemNetwork
|
||||||
|
ruleItemDomain
|
||||||
|
ruleItemDomainKeyword
|
||||||
|
ruleItemDomainRegex
|
||||||
|
ruleItemSourceIPCIDR
|
||||||
|
ruleItemIPCIDR
|
||||||
|
ruleItemSourcePort
|
||||||
|
ruleItemSourcePortRange
|
||||||
|
ruleItemPort
|
||||||
|
ruleItemPortRange
|
||||||
|
ruleItemProcessName
|
||||||
|
ruleItemProcessPath
|
||||||
|
ruleItemPackageName
|
||||||
|
ruleItemWIFISSID
|
||||||
|
ruleItemWIFIBSSID
|
||||||
|
ruleItemFinal uint8 = 0xFF
|
||||||
|
)
|
||||||
|
|
||||||
|
func Read(reader io.Reader, recovery bool) (ruleSet option.PlainRuleSet, err error) {
|
||||||
|
var magicBytes [3]byte
|
||||||
|
_, err = io.ReadFull(reader, magicBytes[:])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if magicBytes != MagicBytes {
|
||||||
|
err = E.New("invalid sing-box rule set file")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var version uint8
|
||||||
|
err = binary.Read(reader, binary.BigEndian, &version)
|
||||||
|
if err != nil {
|
||||||
|
return ruleSet, err
|
||||||
|
}
|
||||||
|
if version != 1 {
|
||||||
|
return ruleSet, E.New("unsupported version: ", version)
|
||||||
|
}
|
||||||
|
zReader, err := zlib.NewReader(reader)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
length, err := rw.ReadUVariant(zReader)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ruleSet.Rules = make([]option.HeadlessRule, length)
|
||||||
|
for i := uint64(0); i < length; i++ {
|
||||||
|
ruleSet.Rules[i], err = readRule(zReader, recovery)
|
||||||
|
if err != nil {
|
||||||
|
err = E.Cause(err, "read rule[", i, "]")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func Write(writer io.Writer, ruleSet option.PlainRuleSet) error {
|
||||||
|
_, err := writer.Write(MagicBytes[:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = binary.Write(writer, binary.BigEndian, uint8(1))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
zWriter, err := zlib.NewWriterLevel(writer, zlib.BestCompression)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = rw.WriteUVariant(zWriter, uint64(len(ruleSet.Rules)))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, rule := range ruleSet.Rules {
|
||||||
|
err = writeRule(zWriter, rule)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return zWriter.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func readRule(reader io.Reader, recovery bool) (rule option.HeadlessRule, err error) {
|
||||||
|
var ruleType uint8
|
||||||
|
err = binary.Read(reader, binary.BigEndian, &ruleType)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch ruleType {
|
||||||
|
case 0:
|
||||||
|
rule.Type = C.RuleTypeDefault
|
||||||
|
rule.DefaultOptions, err = readDefaultRule(reader, recovery)
|
||||||
|
case 1:
|
||||||
|
rule.Type = C.RuleTypeLogical
|
||||||
|
rule.LogicalOptions, err = readLogicalRule(reader, recovery)
|
||||||
|
default:
|
||||||
|
err = E.New("unknown rule type: ", ruleType)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeRule(writer io.Writer, rule option.HeadlessRule) error {
|
||||||
|
switch rule.Type {
|
||||||
|
case C.RuleTypeDefault:
|
||||||
|
return writeDefaultRule(writer, rule.DefaultOptions)
|
||||||
|
case C.RuleTypeLogical:
|
||||||
|
return writeLogicalRule(writer, rule.LogicalOptions)
|
||||||
|
default:
|
||||||
|
panic("unknown rule type: " + rule.Type)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func readDefaultRule(reader io.Reader, recovery bool) (rule option.DefaultHeadlessRule, err error) {
|
||||||
|
var lastItemType uint8
|
||||||
|
for {
|
||||||
|
var itemType uint8
|
||||||
|
err = binary.Read(reader, binary.BigEndian, &itemType)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch itemType {
|
||||||
|
case ruleItemQueryType:
|
||||||
|
var rawQueryType []uint16
|
||||||
|
rawQueryType, err = readRuleItemUint16(reader)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
rule.QueryType = common.Map(rawQueryType, func(it uint16) option.DNSQueryType {
|
||||||
|
return option.DNSQueryType(it)
|
||||||
|
})
|
||||||
|
case ruleItemNetwork:
|
||||||
|
rule.Network, err = readRuleItemString(reader)
|
||||||
|
case ruleItemDomain:
|
||||||
|
var matcher *domain.Matcher
|
||||||
|
matcher, err = domain.ReadMatcher(reader)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
rule.DomainMatcher = matcher
|
||||||
|
case ruleItemDomainKeyword:
|
||||||
|
rule.DomainKeyword, err = readRuleItemString(reader)
|
||||||
|
case ruleItemDomainRegex:
|
||||||
|
rule.DomainRegex, err = readRuleItemString(reader)
|
||||||
|
case ruleItemSourceIPCIDR:
|
||||||
|
rule.SourceIPSet, err = readIPSet(reader)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if recovery {
|
||||||
|
rule.SourceIPCIDR = common.Map(rule.SourceIPSet.Prefixes(), netip.Prefix.String)
|
||||||
|
}
|
||||||
|
case ruleItemIPCIDR:
|
||||||
|
rule.IPSet, err = readIPSet(reader)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if recovery {
|
||||||
|
rule.IPCIDR = common.Map(rule.IPSet.Prefixes(), netip.Prefix.String)
|
||||||
|
}
|
||||||
|
case ruleItemSourcePort:
|
||||||
|
rule.SourcePort, err = readRuleItemUint16(reader)
|
||||||
|
case ruleItemSourcePortRange:
|
||||||
|
rule.SourcePortRange, err = readRuleItemString(reader)
|
||||||
|
case ruleItemPort:
|
||||||
|
rule.Port, err = readRuleItemUint16(reader)
|
||||||
|
case ruleItemPortRange:
|
||||||
|
rule.PortRange, err = readRuleItemString(reader)
|
||||||
|
case ruleItemProcessName:
|
||||||
|
rule.ProcessName, err = readRuleItemString(reader)
|
||||||
|
case ruleItemProcessPath:
|
||||||
|
rule.ProcessPath, err = readRuleItemString(reader)
|
||||||
|
case ruleItemPackageName:
|
||||||
|
rule.PackageName, err = readRuleItemString(reader)
|
||||||
|
case ruleItemWIFISSID:
|
||||||
|
rule.WIFISSID, err = readRuleItemString(reader)
|
||||||
|
case ruleItemWIFIBSSID:
|
||||||
|
rule.WIFIBSSID, err = readRuleItemString(reader)
|
||||||
|
case ruleItemFinal:
|
||||||
|
err = binary.Read(reader, binary.BigEndian, &rule.Invert)
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
err = E.New("unknown rule item type: ", itemType, ", last type: ", lastItemType)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
lastItemType = itemType
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeDefaultRule(writer io.Writer, rule option.DefaultHeadlessRule) error {
|
||||||
|
err := binary.Write(writer, binary.BigEndian, uint8(0))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(rule.QueryType) > 0 {
|
||||||
|
err = writeRuleItemUint16(writer, ruleItemQueryType, common.Map(rule.QueryType, func(it option.DNSQueryType) uint16 {
|
||||||
|
return uint16(it)
|
||||||
|
}))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(rule.Network) > 0 {
|
||||||
|
err = writeRuleItemString(writer, ruleItemNetwork, rule.Network)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(rule.Domain) > 0 || len(rule.DomainSuffix) > 0 {
|
||||||
|
err = binary.Write(writer, binary.BigEndian, ruleItemDomain)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = domain.NewMatcher(rule.Domain, rule.DomainSuffix).Write(writer)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(rule.DomainKeyword) > 0 {
|
||||||
|
err = writeRuleItemString(writer, ruleItemDomainKeyword, rule.DomainKeyword)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(rule.DomainRegex) > 0 {
|
||||||
|
err = writeRuleItemString(writer, ruleItemDomainRegex, rule.DomainRegex)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(rule.SourceIPCIDR) > 0 {
|
||||||
|
err = writeRuleItemCIDR(writer, ruleItemSourceIPCIDR, rule.SourceIPCIDR)
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "source_ipcidr")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(rule.IPCIDR) > 0 {
|
||||||
|
err = writeRuleItemCIDR(writer, ruleItemIPCIDR, rule.IPCIDR)
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "ipcidr")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(rule.SourcePort) > 0 {
|
||||||
|
err = writeRuleItemUint16(writer, ruleItemSourcePort, rule.SourcePort)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(rule.SourcePortRange) > 0 {
|
||||||
|
err = writeRuleItemString(writer, ruleItemSourcePortRange, rule.SourcePortRange)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(rule.Port) > 0 {
|
||||||
|
err = writeRuleItemUint16(writer, ruleItemPort, rule.Port)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(rule.PortRange) > 0 {
|
||||||
|
err = writeRuleItemString(writer, ruleItemPortRange, rule.PortRange)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(rule.ProcessName) > 0 {
|
||||||
|
err = writeRuleItemString(writer, ruleItemProcessName, rule.ProcessName)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(rule.ProcessPath) > 0 {
|
||||||
|
err = writeRuleItemString(writer, ruleItemProcessPath, rule.ProcessPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(rule.PackageName) > 0 {
|
||||||
|
err = writeRuleItemString(writer, ruleItemPackageName, rule.PackageName)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(rule.WIFISSID) > 0 {
|
||||||
|
err = writeRuleItemString(writer, ruleItemWIFISSID, rule.WIFISSID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(rule.WIFIBSSID) > 0 {
|
||||||
|
err = writeRuleItemString(writer, ruleItemWIFIBSSID, rule.WIFIBSSID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = binary.Write(writer, binary.BigEndian, ruleItemFinal)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = binary.Write(writer, binary.BigEndian, rule.Invert)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func readRuleItemString(reader io.Reader) ([]string, error) {
|
||||||
|
length, err := rw.ReadUVariant(reader)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
value := make([]string, length)
|
||||||
|
for i := uint64(0); i < length; i++ {
|
||||||
|
value[i], err = rw.ReadVString(reader)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeRuleItemString(writer io.Writer, itemType uint8, value []string) error {
|
||||||
|
err := binary.Write(writer, binary.BigEndian, itemType)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = rw.WriteUVariant(writer, uint64(len(value)))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, item := range value {
|
||||||
|
err = rw.WriteVString(writer, item)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func readRuleItemUint16(reader io.Reader) ([]uint16, error) {
|
||||||
|
length, err := rw.ReadUVariant(reader)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
value := make([]uint16, length)
|
||||||
|
for i := uint64(0); i < length; i++ {
|
||||||
|
err = binary.Read(reader, binary.BigEndian, &value[i])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeRuleItemUint16(writer io.Writer, itemType uint8, value []uint16) error {
|
||||||
|
err := binary.Write(writer, binary.BigEndian, itemType)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = rw.WriteUVariant(writer, uint64(len(value)))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, item := range value {
|
||||||
|
err = binary.Write(writer, binary.BigEndian, item)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeRuleItemCIDR(writer io.Writer, itemType uint8, value []string) error {
|
||||||
|
var builder netipx.IPSetBuilder
|
||||||
|
for i, prefixString := range value {
|
||||||
|
prefix, err := netip.ParsePrefix(prefixString)
|
||||||
|
if err == nil {
|
||||||
|
builder.AddPrefix(prefix)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
addr, addrErr := netip.ParseAddr(prefixString)
|
||||||
|
if addrErr == nil {
|
||||||
|
builder.Add(addr)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return E.Cause(err, "parse [", i, "]")
|
||||||
|
}
|
||||||
|
ipSet, err := builder.IPSet()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = binary.Write(writer, binary.BigEndian, itemType)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return writeIPSet(writer, ipSet)
|
||||||
|
}
|
||||||
|
|
||||||
|
func readLogicalRule(reader io.Reader, recovery bool) (logicalRule option.LogicalHeadlessRule, err error) {
|
||||||
|
var mode uint8
|
||||||
|
err = binary.Read(reader, binary.BigEndian, &mode)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch mode {
|
||||||
|
case 0:
|
||||||
|
logicalRule.Mode = C.LogicalTypeAnd
|
||||||
|
case 1:
|
||||||
|
logicalRule.Mode = C.LogicalTypeOr
|
||||||
|
default:
|
||||||
|
err = E.New("unknown logical mode: ", mode)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
length, err := rw.ReadUVariant(reader)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
logicalRule.Rules = make([]option.HeadlessRule, length)
|
||||||
|
for i := uint64(0); i < length; i++ {
|
||||||
|
logicalRule.Rules[i], err = readRule(reader, recovery)
|
||||||
|
if err != nil {
|
||||||
|
err = E.Cause(err, "read logical rule [", i, "]")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = binary.Read(reader, binary.BigEndian, &logicalRule.Invert)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeLogicalRule(writer io.Writer, logicalRule option.LogicalHeadlessRule) error {
|
||||||
|
err := binary.Write(writer, binary.BigEndian, uint8(1))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch logicalRule.Mode {
|
||||||
|
case C.LogicalTypeAnd:
|
||||||
|
err = binary.Write(writer, binary.BigEndian, uint8(0))
|
||||||
|
case C.LogicalTypeOr:
|
||||||
|
err = binary.Write(writer, binary.BigEndian, uint8(1))
|
||||||
|
default:
|
||||||
|
panic("unknown logical mode: " + logicalRule.Mode)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = rw.WriteUVariant(writer, uint64(len(logicalRule.Rules)))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, rule := range logicalRule.Rules {
|
||||||
|
err = writeRule(writer, rule)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = binary.Write(writer, binary.BigEndian, logicalRule.Invert)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
116
common/srs/ip_set.go
Normal file
116
common/srs/ip_set.go
Normal file
|
@ -0,0 +1,116 @@
|
||||||
|
package srs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"io"
|
||||||
|
"net/netip"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/sagernet/sing/common/rw"
|
||||||
|
|
||||||
|
"go4.org/netipx"
|
||||||
|
)
|
||||||
|
|
||||||
|
type myIPSet struct {
|
||||||
|
rr []myIPRange
|
||||||
|
}
|
||||||
|
|
||||||
|
type myIPRange struct {
|
||||||
|
from netip.Addr
|
||||||
|
to netip.Addr
|
||||||
|
}
|
||||||
|
|
||||||
|
func readIPSet(reader io.Reader) (*netipx.IPSet, error) {
|
||||||
|
var version uint8
|
||||||
|
err := binary.Read(reader, binary.BigEndian, &version)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var length uint64
|
||||||
|
err = binary.Read(reader, binary.BigEndian, &length)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
mySet := &myIPSet{
|
||||||
|
rr: make([]myIPRange, length),
|
||||||
|
}
|
||||||
|
for i := uint64(0); i < length; i++ {
|
||||||
|
var (
|
||||||
|
fromLen uint64
|
||||||
|
toLen uint64
|
||||||
|
fromAddr netip.Addr
|
||||||
|
toAddr netip.Addr
|
||||||
|
)
|
||||||
|
fromLen, err = rw.ReadUVariant(reader)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
fromBytes := make([]byte, fromLen)
|
||||||
|
_, err = io.ReadFull(reader, fromBytes)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
err = fromAddr.UnmarshalBinary(fromBytes)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
toLen, err = rw.ReadUVariant(reader)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
toBytes := make([]byte, toLen)
|
||||||
|
_, err = io.ReadFull(reader, toBytes)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
err = toAddr.UnmarshalBinary(toBytes)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
mySet.rr[i] = myIPRange{fromAddr, toAddr}
|
||||||
|
}
|
||||||
|
return (*netipx.IPSet)(unsafe.Pointer(mySet)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeIPSet(writer io.Writer, set *netipx.IPSet) error {
|
||||||
|
err := binary.Write(writer, binary.BigEndian, uint8(1))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
mySet := (*myIPSet)(unsafe.Pointer(set))
|
||||||
|
err = binary.Write(writer, binary.BigEndian, uint64(len(mySet.rr)))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, rr := range mySet.rr {
|
||||||
|
var (
|
||||||
|
fromBinary []byte
|
||||||
|
toBinary []byte
|
||||||
|
)
|
||||||
|
fromBinary, err = rr.from.MarshalBinary()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = rw.WriteUVariant(writer, uint64(len(fromBinary)))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = writer.Write(fromBinary)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
toBinary, err = rr.to.MarshalBinary()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = rw.WriteUVariant(writer, uint64(len(toBinary)))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = writer.Write(toBinary)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -9,3 +9,11 @@ const (
|
||||||
LogicalTypeAnd = "and"
|
LogicalTypeAnd = "and"
|
||||||
LogicalTypeOr = "or"
|
LogicalTypeOr = "or"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
RuleSetTypeLocal = "local"
|
||||||
|
RuleSetTypeRemote = "remote"
|
||||||
|
RuleSetVersion1 = 1
|
||||||
|
RuleSetFormatSource = "source"
|
||||||
|
RuleSetFormatBinary = "binary"
|
||||||
|
)
|
||||||
|
|
|
@ -22,11 +22,13 @@ var (
|
||||||
bucketSelected = []byte("selected")
|
bucketSelected = []byte("selected")
|
||||||
bucketExpand = []byte("group_expand")
|
bucketExpand = []byte("group_expand")
|
||||||
bucketMode = []byte("clash_mode")
|
bucketMode = []byte("clash_mode")
|
||||||
|
bucketRuleSet = []byte("rule_set")
|
||||||
|
|
||||||
bucketNameList = []string{
|
bucketNameList = []string{
|
||||||
string(bucketSelected),
|
string(bucketSelected),
|
||||||
string(bucketExpand),
|
string(bucketExpand),
|
||||||
string(bucketMode),
|
string(bucketMode),
|
||||||
|
string(bucketRuleSet),
|
||||||
}
|
}
|
||||||
|
|
||||||
cacheIDDefault = []byte("default")
|
cacheIDDefault = []byte("default")
|
||||||
|
@ -257,3 +259,36 @@ func (c *CacheFile) StoreGroupExpand(group string, isExpand bool) error {
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *CacheFile) LoadRuleSet(tag string) *adapter.SavedRuleSet {
|
||||||
|
var savedSet adapter.SavedRuleSet
|
||||||
|
err := c.DB.View(func(t *bbolt.Tx) error {
|
||||||
|
bucket := c.bucket(t, bucketRuleSet)
|
||||||
|
if bucket == nil {
|
||||||
|
return os.ErrNotExist
|
||||||
|
}
|
||||||
|
setBinary := bucket.Get([]byte(tag))
|
||||||
|
if len(setBinary) == 0 {
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
return savedSet.UnmarshalBinary(setBinary)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &savedSet
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CacheFile) SaveRuleSet(tag string, set *adapter.SavedRuleSet) error {
|
||||||
|
return c.DB.Batch(func(t *bbolt.Tx) error {
|
||||||
|
bucket, err := c.createBucket(t, bucketRuleSet)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
setBinary, err := set.MarshalBinary()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return bucket.Put([]byte(tag), setBinary)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
|
@ -25,7 +25,7 @@ func (c *CacheFile) FakeIPMetadata() *adapter.FakeIPMetadata {
|
||||||
err := c.DB.Batch(func(tx *bbolt.Tx) error {
|
err := c.DB.Batch(func(tx *bbolt.Tx) error {
|
||||||
bucket := tx.Bucket(bucketFakeIP)
|
bucket := tx.Bucket(bucketFakeIP)
|
||||||
if bucket == nil {
|
if bucket == nil {
|
||||||
return nil
|
return os.ErrNotExist
|
||||||
}
|
}
|
||||||
metadataBinary := bucket.Get(keyMetadata)
|
metadataBinary := bucket.Get(keyMetadata)
|
||||||
if len(metadataBinary) == 0 {
|
if len(metadataBinary) == 0 {
|
||||||
|
|
|
@ -100,8 +100,10 @@ func getProxies(server *Server, router adapter.Router) func(w http.ResponseWrite
|
||||||
allProxies = append(allProxies, detour.Tag())
|
allProxies = append(allProxies, detour.Tag())
|
||||||
}
|
}
|
||||||
|
|
||||||
defaultTag := router.DefaultOutbound(N.NetworkTCP).Tag()
|
var defaultTag string
|
||||||
if defaultTag == "" {
|
if defaultOutbound, err := router.DefaultOutbound(N.NetworkTCP); err == nil {
|
||||||
|
defaultTag = defaultOutbound.Tag()
|
||||||
|
} else {
|
||||||
defaultTag = allProxies[0]
|
defaultTag = allProxies[0]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -51,7 +51,11 @@ func (s *Server) downloadExternalUI() error {
|
||||||
}
|
}
|
||||||
detour = outbound
|
detour = outbound
|
||||||
} else {
|
} else {
|
||||||
detour = s.router.DefaultOutbound(N.NetworkTCP)
|
outbound, err := s.router.DefaultOutbound(N.NetworkTCP)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
detour = outbound
|
||||||
}
|
}
|
||||||
httpClient := &http.Client{
|
httpClient := &http.Client{
|
||||||
Transport: &http.Transport{
|
Transport: &http.Transport{
|
||||||
|
|
|
@ -94,7 +94,9 @@ func NewTCPTracker(conn net.Conn, manager *Manager, metadata Metadata, router ad
|
||||||
var chain []string
|
var chain []string
|
||||||
var next string
|
var next string
|
||||||
if rule == nil {
|
if rule == nil {
|
||||||
next = router.DefaultOutbound(N.NetworkTCP).Tag()
|
if defaultOutbound, err := router.DefaultOutbound(N.NetworkTCP); err == nil {
|
||||||
|
next = defaultOutbound.Tag()
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
next = rule.Outbound()
|
next = rule.Outbound()
|
||||||
}
|
}
|
||||||
|
@ -181,7 +183,9 @@ func NewUDPTracker(conn N.PacketConn, manager *Manager, metadata Metadata, route
|
||||||
var chain []string
|
var chain []string
|
||||||
var next string
|
var next string
|
||||||
if rule == nil {
|
if rule == nil {
|
||||||
next = router.DefaultOutbound(N.NetworkUDP).Tag()
|
if defaultOutbound, err := router.DefaultOutbound(N.NetworkUDP); err == nil {
|
||||||
|
next = defaultOutbound.Tag()
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
next = rule.Outbound()
|
next = rule.Outbound()
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,16 +23,16 @@ type ClashAPIOptions struct {
|
||||||
DefaultMode string `json:"default_mode,omitempty"`
|
DefaultMode string `json:"default_mode,omitempty"`
|
||||||
ModeList []string `json:"-"`
|
ModeList []string `json:"-"`
|
||||||
|
|
||||||
|
// Deprecated: migrated to global cache file
|
||||||
|
CacheFile string `json:"cache_file,omitempty"`
|
||||||
|
// Deprecated: migrated to global cache file
|
||||||
|
CacheID string `json:"cache_id,omitempty"`
|
||||||
// Deprecated: migrated to global cache file
|
// Deprecated: migrated to global cache file
|
||||||
StoreMode bool `json:"store_mode,omitempty"`
|
StoreMode bool `json:"store_mode,omitempty"`
|
||||||
// Deprecated: migrated to global cache file
|
// Deprecated: migrated to global cache file
|
||||||
StoreSelected bool `json:"store_selected,omitempty"`
|
StoreSelected bool `json:"store_selected,omitempty"`
|
||||||
// Deprecated: migrated to global cache file
|
// Deprecated: migrated to global cache file
|
||||||
StoreFakeIP bool `json:"store_fakeip,omitempty"`
|
StoreFakeIP bool `json:"store_fakeip,omitempty"`
|
||||||
// Deprecated: migrated to global cache file
|
|
||||||
CacheFile string `json:"cache_file,omitempty"`
|
|
||||||
// Deprecated: migrated to global cache file
|
|
||||||
CacheID string `json:"cache_id,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type V2RayAPIOptions struct {
|
type V2RayAPIOptions struct {
|
||||||
|
|
|
@ -4,6 +4,7 @@ type RouteOptions struct {
|
||||||
GeoIP *GeoIPOptions `json:"geoip,omitempty"`
|
GeoIP *GeoIPOptions `json:"geoip,omitempty"`
|
||||||
Geosite *GeositeOptions `json:"geosite,omitempty"`
|
Geosite *GeositeOptions `json:"geosite,omitempty"`
|
||||||
Rules []Rule `json:"rules,omitempty"`
|
Rules []Rule `json:"rules,omitempty"`
|
||||||
|
RuleSet []RuleSet `json:"rule_set,omitempty"`
|
||||||
Final string `json:"final,omitempty"`
|
Final string `json:"final,omitempty"`
|
||||||
FindProcess bool `json:"find_process,omitempty"`
|
FindProcess bool `json:"find_process,omitempty"`
|
||||||
AutoDetectInterface bool `json:"auto_detect_interface,omitempty"`
|
AutoDetectInterface bool `json:"auto_detect_interface,omitempty"`
|
||||||
|
|
|
@ -91,6 +91,8 @@ type DefaultRule struct {
|
||||||
ClashMode string `json:"clash_mode,omitempty"`
|
ClashMode string `json:"clash_mode,omitempty"`
|
||||||
WIFISSID Listable[string] `json:"wifi_ssid,omitempty"`
|
WIFISSID Listable[string] `json:"wifi_ssid,omitempty"`
|
||||||
WIFIBSSID Listable[string] `json:"wifi_bssid,omitempty"`
|
WIFIBSSID Listable[string] `json:"wifi_bssid,omitempty"`
|
||||||
|
RuleSet Listable[string] `json:"rule_set,omitempty"`
|
||||||
|
RuleSetIPCIDRMatchSource bool `json:"rule_set_ipcidr_match_source,omitempty"`
|
||||||
Invert bool `json:"invert,omitempty"`
|
Invert bool `json:"invert,omitempty"`
|
||||||
Outbound string `json:"outbound,omitempty"`
|
Outbound string `json:"outbound,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
|
@ -91,6 +91,7 @@ type DefaultDNSRule struct {
|
||||||
ClashMode string `json:"clash_mode,omitempty"`
|
ClashMode string `json:"clash_mode,omitempty"`
|
||||||
WIFISSID Listable[string] `json:"wifi_ssid,omitempty"`
|
WIFISSID Listable[string] `json:"wifi_ssid,omitempty"`
|
||||||
WIFIBSSID Listable[string] `json:"wifi_bssid,omitempty"`
|
WIFIBSSID Listable[string] `json:"wifi_bssid,omitempty"`
|
||||||
|
RuleSet Listable[string] `json:"rule_set,omitempty"`
|
||||||
Invert bool `json:"invert,omitempty"`
|
Invert bool `json:"invert,omitempty"`
|
||||||
Server string `json:"server,omitempty"`
|
Server string `json:"server,omitempty"`
|
||||||
DisableCache bool `json:"disable_cache,omitempty"`
|
DisableCache bool `json:"disable_cache,omitempty"`
|
||||||
|
|
230
option/rule_set.go
Normal file
230
option/rule_set.go
Normal file
|
@ -0,0 +1,230 @@
|
||||||
|
package option
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"github.com/sagernet/sing-box/common/json"
|
||||||
|
C "github.com/sagernet/sing-box/constant"
|
||||||
|
"github.com/sagernet/sing/common"
|
||||||
|
"github.com/sagernet/sing/common/domain"
|
||||||
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
|
F "github.com/sagernet/sing/common/format"
|
||||||
|
|
||||||
|
"go4.org/netipx"
|
||||||
|
)
|
||||||
|
|
||||||
|
type _RuleSet struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
Tag string `json:"tag"`
|
||||||
|
Format string `json:"format"`
|
||||||
|
LocalOptions LocalRuleSet `json:"-"`
|
||||||
|
RemoteOptions RemoteRuleSet `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type RuleSet _RuleSet
|
||||||
|
|
||||||
|
func (r RuleSet) MarshalJSON() ([]byte, error) {
|
||||||
|
var v any
|
||||||
|
switch r.Type {
|
||||||
|
case C.RuleSetTypeLocal:
|
||||||
|
v = r.LocalOptions
|
||||||
|
case C.RuleSetTypeRemote:
|
||||||
|
v = r.RemoteOptions
|
||||||
|
default:
|
||||||
|
return nil, E.New("unknown rule set type: " + r.Type)
|
||||||
|
}
|
||||||
|
return MarshallObjects((_RuleSet)(r), v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RuleSet) UnmarshalJSON(bytes []byte) error {
|
||||||
|
err := json.Unmarshal(bytes, (*_RuleSet)(r))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if r.Tag == "" {
|
||||||
|
return E.New("missing tag")
|
||||||
|
}
|
||||||
|
switch r.Format {
|
||||||
|
case "":
|
||||||
|
return E.New("missing format")
|
||||||
|
case C.RuleSetFormatSource, C.RuleSetFormatBinary:
|
||||||
|
default:
|
||||||
|
return E.New("unknown rule set format: " + r.Format)
|
||||||
|
}
|
||||||
|
var v any
|
||||||
|
switch r.Type {
|
||||||
|
case C.RuleSetTypeLocal:
|
||||||
|
v = &r.LocalOptions
|
||||||
|
case C.RuleSetTypeRemote:
|
||||||
|
v = &r.RemoteOptions
|
||||||
|
case "":
|
||||||
|
return E.New("missing type")
|
||||||
|
default:
|
||||||
|
return E.New("unknown rule set type: " + r.Type)
|
||||||
|
}
|
||||||
|
err = UnmarshallExcluded(bytes, (*_RuleSet)(r), v)
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "rule set")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type LocalRuleSet struct {
|
||||||
|
Path string `json:"path,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type RemoteRuleSet struct {
|
||||||
|
URL string `json:"url"`
|
||||||
|
DownloadDetour string `json:"download_detour,omitempty"`
|
||||||
|
UpdateInterval Duration `json:"update_interval,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type _HeadlessRule struct {
|
||||||
|
Type string `json:"type,omitempty"`
|
||||||
|
DefaultOptions DefaultHeadlessRule `json:"-"`
|
||||||
|
LogicalOptions LogicalHeadlessRule `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type HeadlessRule _HeadlessRule
|
||||||
|
|
||||||
|
func (r HeadlessRule) MarshalJSON() ([]byte, error) {
|
||||||
|
var v any
|
||||||
|
switch r.Type {
|
||||||
|
case C.RuleTypeDefault:
|
||||||
|
r.Type = ""
|
||||||
|
v = r.DefaultOptions
|
||||||
|
case C.RuleTypeLogical:
|
||||||
|
v = r.LogicalOptions
|
||||||
|
default:
|
||||||
|
return nil, E.New("unknown rule type: " + r.Type)
|
||||||
|
}
|
||||||
|
return MarshallObjects((_HeadlessRule)(r), v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *HeadlessRule) UnmarshalJSON(bytes []byte) error {
|
||||||
|
err := json.Unmarshal(bytes, (*_HeadlessRule)(r))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var v any
|
||||||
|
switch r.Type {
|
||||||
|
case "", C.RuleTypeDefault:
|
||||||
|
r.Type = C.RuleTypeDefault
|
||||||
|
v = &r.DefaultOptions
|
||||||
|
case C.RuleTypeLogical:
|
||||||
|
v = &r.LogicalOptions
|
||||||
|
default:
|
||||||
|
return E.New("unknown rule type: " + r.Type)
|
||||||
|
}
|
||||||
|
err = UnmarshallExcluded(bytes, (*_HeadlessRule)(r), v)
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "route rule-set rule")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r HeadlessRule) IsValid() bool {
|
||||||
|
switch r.Type {
|
||||||
|
case C.RuleTypeDefault, "":
|
||||||
|
return r.DefaultOptions.IsValid()
|
||||||
|
case C.RuleTypeLogical:
|
||||||
|
return r.LogicalOptions.IsValid()
|
||||||
|
default:
|
||||||
|
panic("unknown rule type: " + r.Type)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type DefaultHeadlessRule struct {
|
||||||
|
QueryType Listable[DNSQueryType] `json:"query_type,omitempty"`
|
||||||
|
Network Listable[string] `json:"network,omitempty"`
|
||||||
|
Domain Listable[string] `json:"domain,omitempty"`
|
||||||
|
DomainSuffix Listable[string] `json:"domain_suffix,omitempty"`
|
||||||
|
DomainKeyword Listable[string] `json:"domain_keyword,omitempty"`
|
||||||
|
DomainRegex Listable[string] `json:"domain_regex,omitempty"`
|
||||||
|
SourceIPCIDR Listable[string] `json:"source_ip_cidr,omitempty"`
|
||||||
|
IPCIDR Listable[string] `json:"ip_cidr,omitempty"`
|
||||||
|
SourcePort Listable[uint16] `json:"source_port,omitempty"`
|
||||||
|
SourcePortRange Listable[string] `json:"source_port_range,omitempty"`
|
||||||
|
Port Listable[uint16] `json:"port,omitempty"`
|
||||||
|
PortRange Listable[string] `json:"port_range,omitempty"`
|
||||||
|
ProcessName Listable[string] `json:"process_name,omitempty"`
|
||||||
|
ProcessPath Listable[string] `json:"process_path,omitempty"`
|
||||||
|
PackageName Listable[string] `json:"package_name,omitempty"`
|
||||||
|
WIFISSID Listable[string] `json:"wifi_ssid,omitempty"`
|
||||||
|
WIFIBSSID Listable[string] `json:"wifi_bssid,omitempty"`
|
||||||
|
Invert bool `json:"invert,omitempty"`
|
||||||
|
|
||||||
|
DomainMatcher *domain.Matcher `json:"-"`
|
||||||
|
SourceIPSet *netipx.IPSet `json:"-"`
|
||||||
|
IPSet *netipx.IPSet `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r DefaultHeadlessRule) IsValid() bool {
|
||||||
|
var defaultValue DefaultHeadlessRule
|
||||||
|
defaultValue.Invert = r.Invert
|
||||||
|
return !reflect.DeepEqual(r, defaultValue)
|
||||||
|
}
|
||||||
|
|
||||||
|
type LogicalHeadlessRule struct {
|
||||||
|
Mode string `json:"mode"`
|
||||||
|
Rules []HeadlessRule `json:"rules,omitempty"`
|
||||||
|
Invert bool `json:"invert,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r LogicalHeadlessRule) IsValid() bool {
|
||||||
|
return len(r.Rules) > 0 && common.All(r.Rules, HeadlessRule.IsValid)
|
||||||
|
}
|
||||||
|
|
||||||
|
type _PlainRuleSetCompat struct {
|
||||||
|
Version int `json:"version"`
|
||||||
|
Options PlainRuleSet `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type PlainRuleSetCompat _PlainRuleSetCompat
|
||||||
|
|
||||||
|
func (r PlainRuleSetCompat) MarshalJSON() ([]byte, error) {
|
||||||
|
var v any
|
||||||
|
switch r.Version {
|
||||||
|
case C.RuleSetVersion1:
|
||||||
|
v = r.Options
|
||||||
|
default:
|
||||||
|
return nil, E.New("unknown rule set version: ", r.Version)
|
||||||
|
}
|
||||||
|
return MarshallObjects((_PlainRuleSetCompat)(r), v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *PlainRuleSetCompat) UnmarshalJSON(bytes []byte) error {
|
||||||
|
err := json.Unmarshal(bytes, (*_PlainRuleSetCompat)(r))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var v any
|
||||||
|
switch r.Version {
|
||||||
|
case C.RuleSetVersion1:
|
||||||
|
v = &r.Options
|
||||||
|
case 0:
|
||||||
|
return E.New("missing rule set version")
|
||||||
|
default:
|
||||||
|
return E.New("unknown rule set version: ", r.Version)
|
||||||
|
}
|
||||||
|
err = UnmarshallExcluded(bytes, (*_PlainRuleSetCompat)(r), v)
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "rule set")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r PlainRuleSetCompat) Upgrade() PlainRuleSet {
|
||||||
|
var result PlainRuleSet
|
||||||
|
switch r.Version {
|
||||||
|
case C.RuleSetVersion1:
|
||||||
|
result = r.Options
|
||||||
|
default:
|
||||||
|
panic("unknown rule set version: " + F.ToString(r.Version))
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
type PlainRuleSet struct {
|
||||||
|
Rules []HeadlessRule `json:"rules,omitempty"`
|
||||||
|
}
|
226
option/time_unit.go
Normal file
226
option/time_unit.go
Normal file
|
@ -0,0 +1,226 @@
|
||||||
|
package option
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
const durationDay = 24 * time.Hour
|
||||||
|
|
||||||
|
var unitMap = map[string]uint64{
|
||||||
|
"ns": uint64(time.Nanosecond),
|
||||||
|
"us": uint64(time.Microsecond),
|
||||||
|
"µs": uint64(time.Microsecond), // U+00B5 = micro symbol
|
||||||
|
"μs": uint64(time.Microsecond), // U+03BC = Greek letter mu
|
||||||
|
"ms": uint64(time.Millisecond),
|
||||||
|
"s": uint64(time.Second),
|
||||||
|
"m": uint64(time.Minute),
|
||||||
|
"h": uint64(time.Hour),
|
||||||
|
"d": uint64(durationDay),
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseDuration parses a duration string.
|
||||||
|
// A duration string is a possibly signed sequence of
|
||||||
|
// decimal numbers, each with optional fraction and a unit suffix,
|
||||||
|
// such as "300ms", "-1.5h" or "2h45m".
|
||||||
|
// Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||||
|
func ParseDuration(s string) (Duration, error) {
|
||||||
|
// [-+]?([0-9]*(\.[0-9]*)?[a-z]+)+
|
||||||
|
orig := s
|
||||||
|
var d uint64
|
||||||
|
neg := false
|
||||||
|
|
||||||
|
// Consume [-+]?
|
||||||
|
if s != "" {
|
||||||
|
c := s[0]
|
||||||
|
if c == '-' || c == '+' {
|
||||||
|
neg = c == '-'
|
||||||
|
s = s[1:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Special case: if all that is left is "0", this is zero.
|
||||||
|
if s == "0" {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
if s == "" {
|
||||||
|
return 0, errors.New("time: invalid duration " + quote(orig))
|
||||||
|
}
|
||||||
|
for s != "" {
|
||||||
|
var (
|
||||||
|
v, f uint64 // integers before, after decimal point
|
||||||
|
scale float64 = 1 // value = v + f/scale
|
||||||
|
)
|
||||||
|
|
||||||
|
var err error
|
||||||
|
|
||||||
|
// The next character must be [0-9.]
|
||||||
|
if !(s[0] == '.' || '0' <= s[0] && s[0] <= '9') {
|
||||||
|
return 0, errors.New("time: invalid duration " + quote(orig))
|
||||||
|
}
|
||||||
|
// Consume [0-9]*
|
||||||
|
pl := len(s)
|
||||||
|
v, s, err = leadingInt(s)
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.New("time: invalid duration " + quote(orig))
|
||||||
|
}
|
||||||
|
pre := pl != len(s) // whether we consumed anything before a period
|
||||||
|
|
||||||
|
// Consume (\.[0-9]*)?
|
||||||
|
post := false
|
||||||
|
if s != "" && s[0] == '.' {
|
||||||
|
s = s[1:]
|
||||||
|
pl := len(s)
|
||||||
|
f, scale, s = leadingFraction(s)
|
||||||
|
post = pl != len(s)
|
||||||
|
}
|
||||||
|
if !pre && !post {
|
||||||
|
// no digits (e.g. ".s" or "-.s")
|
||||||
|
return 0, errors.New("time: invalid duration " + quote(orig))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Consume unit.
|
||||||
|
i := 0
|
||||||
|
for ; i < len(s); i++ {
|
||||||
|
c := s[i]
|
||||||
|
if c == '.' || '0' <= c && c <= '9' {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if i == 0 {
|
||||||
|
return 0, errors.New("time: missing unit in duration " + quote(orig))
|
||||||
|
}
|
||||||
|
u := s[:i]
|
||||||
|
s = s[i:]
|
||||||
|
unit, ok := unitMap[u]
|
||||||
|
if !ok {
|
||||||
|
return 0, errors.New("time: unknown unit " + quote(u) + " in duration " + quote(orig))
|
||||||
|
}
|
||||||
|
if v > 1<<63/unit {
|
||||||
|
// overflow
|
||||||
|
return 0, errors.New("time: invalid duration " + quote(orig))
|
||||||
|
}
|
||||||
|
v *= unit
|
||||||
|
if f > 0 {
|
||||||
|
// float64 is needed to be nanosecond accurate for fractions of hours.
|
||||||
|
// v >= 0 && (f*unit/scale) <= 3.6e+12 (ns/h, h is the largest unit)
|
||||||
|
v += uint64(float64(f) * (float64(unit) / scale))
|
||||||
|
if v > 1<<63 {
|
||||||
|
// overflow
|
||||||
|
return 0, errors.New("time: invalid duration " + quote(orig))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
d += v
|
||||||
|
if d > 1<<63 {
|
||||||
|
return 0, errors.New("time: invalid duration " + quote(orig))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if neg {
|
||||||
|
return -Duration(d), nil
|
||||||
|
}
|
||||||
|
if d > 1<<63-1 {
|
||||||
|
return 0, errors.New("time: invalid duration " + quote(orig))
|
||||||
|
}
|
||||||
|
return Duration(d), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var errLeadingInt = errors.New("time: bad [0-9]*") // never printed
|
||||||
|
|
||||||
|
// leadingInt consumes the leading [0-9]* from s.
|
||||||
|
func leadingInt[bytes []byte | string](s bytes) (x uint64, rem bytes, err error) {
|
||||||
|
i := 0
|
||||||
|
for ; i < len(s); i++ {
|
||||||
|
c := s[i]
|
||||||
|
if c < '0' || c > '9' {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if x > 1<<63/10 {
|
||||||
|
// overflow
|
||||||
|
return 0, rem, errLeadingInt
|
||||||
|
}
|
||||||
|
x = x*10 + uint64(c) - '0'
|
||||||
|
if x > 1<<63 {
|
||||||
|
// overflow
|
||||||
|
return 0, rem, errLeadingInt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return x, s[i:], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// leadingFraction consumes the leading [0-9]* from s.
|
||||||
|
// It is used only for fractions, so does not return an error on overflow,
|
||||||
|
// it just stops accumulating precision.
|
||||||
|
func leadingFraction(s string) (x uint64, scale float64, rem string) {
|
||||||
|
i := 0
|
||||||
|
scale = 1
|
||||||
|
overflow := false
|
||||||
|
for ; i < len(s); i++ {
|
||||||
|
c := s[i]
|
||||||
|
if c < '0' || c > '9' {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if overflow {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if x > (1<<63-1)/10 {
|
||||||
|
// It's possible for overflow to give a positive number, so take care.
|
||||||
|
overflow = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
y := x*10 + uint64(c) - '0'
|
||||||
|
if y > 1<<63 {
|
||||||
|
overflow = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
x = y
|
||||||
|
scale *= 10
|
||||||
|
}
|
||||||
|
return x, scale, s[i:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// These are borrowed from unicode/utf8 and strconv and replicate behavior in
|
||||||
|
// that package, since we can't take a dependency on either.
|
||||||
|
const (
|
||||||
|
lowerhex = "0123456789abcdef"
|
||||||
|
runeSelf = 0x80
|
||||||
|
runeError = '\uFFFD'
|
||||||
|
)
|
||||||
|
|
||||||
|
func quote(s string) string {
|
||||||
|
buf := make([]byte, 1, len(s)+2) // slice will be at least len(s) + quotes
|
||||||
|
buf[0] = '"'
|
||||||
|
for i, c := range s {
|
||||||
|
if c >= runeSelf || c < ' ' {
|
||||||
|
// This means you are asking us to parse a time.Duration or
|
||||||
|
// time.Location with unprintable or non-ASCII characters in it.
|
||||||
|
// We don't expect to hit this case very often. We could try to
|
||||||
|
// reproduce strconv.Quote's behavior with full fidelity but
|
||||||
|
// given how rarely we expect to hit these edge cases, speed and
|
||||||
|
// conciseness are better.
|
||||||
|
var width int
|
||||||
|
if c == runeError {
|
||||||
|
width = 1
|
||||||
|
if i+2 < len(s) && s[i:i+3] == string(runeError) {
|
||||||
|
width = 3
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
width = len(string(c))
|
||||||
|
}
|
||||||
|
for j := 0; j < width; j++ {
|
||||||
|
buf = append(buf, `\x`...)
|
||||||
|
buf = append(buf, lowerhex[s[i+j]>>4])
|
||||||
|
buf = append(buf, lowerhex[s[i+j]&0xF])
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if c == '"' || c == '\\' {
|
||||||
|
buf = append(buf, '\\')
|
||||||
|
}
|
||||||
|
buf = append(buf, string(c)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
buf = append(buf, '"')
|
||||||
|
return string(buf)
|
||||||
|
}
|
|
@ -164,7 +164,7 @@ func (d *Duration) UnmarshalJSON(bytes []byte) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
duration, err := time.ParseDuration(value)
|
duration, err := ParseDuration(value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -174,6 +174,14 @@ func (d *Duration) UnmarshalJSON(bytes []byte) error {
|
||||||
|
|
||||||
type DNSQueryType uint16
|
type DNSQueryType uint16
|
||||||
|
|
||||||
|
func (t DNSQueryType) String() string {
|
||||||
|
typeName, loaded := mDNS.TypeToString[uint16(t)]
|
||||||
|
if loaded {
|
||||||
|
return typeName
|
||||||
|
}
|
||||||
|
return F.ToString(uint16(t))
|
||||||
|
}
|
||||||
|
|
||||||
func (t DNSQueryType) MarshalJSON() ([]byte, error) {
|
func (t DNSQueryType) MarshalJSON() ([]byte, error) {
|
||||||
typeName, loaded := mDNS.TypeToString[uint16(t)]
|
typeName, loaded := mDNS.TypeToString[uint16(t)]
|
||||||
if loaded {
|
if loaded {
|
||||||
|
|
170
route/router.go
170
route/router.go
|
@ -39,6 +39,7 @@ import (
|
||||||
M "github.com/sagernet/sing/common/metadata"
|
M "github.com/sagernet/sing/common/metadata"
|
||||||
N "github.com/sagernet/sing/common/network"
|
N "github.com/sagernet/sing/common/network"
|
||||||
serviceNTP "github.com/sagernet/sing/common/ntp"
|
serviceNTP "github.com/sagernet/sing/common/ntp"
|
||||||
|
"github.com/sagernet/sing/common/task"
|
||||||
"github.com/sagernet/sing/common/uot"
|
"github.com/sagernet/sing/common/uot"
|
||||||
"github.com/sagernet/sing/service"
|
"github.com/sagernet/sing/service"
|
||||||
"github.com/sagernet/sing/service/pause"
|
"github.com/sagernet/sing/service/pause"
|
||||||
|
@ -64,9 +65,12 @@ type Router struct {
|
||||||
geoIPReader *geoip.Reader
|
geoIPReader *geoip.Reader
|
||||||
geositeReader *geosite.Reader
|
geositeReader *geosite.Reader
|
||||||
geositeCache map[string]adapter.Rule
|
geositeCache map[string]adapter.Rule
|
||||||
|
needFindProcess bool
|
||||||
dnsClient *dns.Client
|
dnsClient *dns.Client
|
||||||
defaultDomainStrategy dns.DomainStrategy
|
defaultDomainStrategy dns.DomainStrategy
|
||||||
dnsRules []adapter.DNSRule
|
dnsRules []adapter.DNSRule
|
||||||
|
ruleSets []adapter.RuleSet
|
||||||
|
ruleSetMap map[string]adapter.RuleSet
|
||||||
defaultTransport dns.Transport
|
defaultTransport dns.Transport
|
||||||
transports []dns.Transport
|
transports []dns.Transport
|
||||||
transportMap map[string]dns.Transport
|
transportMap map[string]dns.Transport
|
||||||
|
@ -107,11 +111,13 @@ func NewRouter(
|
||||||
outboundByTag: make(map[string]adapter.Outbound),
|
outboundByTag: make(map[string]adapter.Outbound),
|
||||||
rules: make([]adapter.Rule, 0, len(options.Rules)),
|
rules: make([]adapter.Rule, 0, len(options.Rules)),
|
||||||
dnsRules: make([]adapter.DNSRule, 0, len(dnsOptions.Rules)),
|
dnsRules: make([]adapter.DNSRule, 0, len(dnsOptions.Rules)),
|
||||||
|
ruleSetMap: make(map[string]adapter.RuleSet),
|
||||||
needGeoIPDatabase: hasRule(options.Rules, isGeoIPRule) || hasDNSRule(dnsOptions.Rules, isGeoIPDNSRule),
|
needGeoIPDatabase: hasRule(options.Rules, isGeoIPRule) || hasDNSRule(dnsOptions.Rules, isGeoIPDNSRule),
|
||||||
needGeositeDatabase: hasRule(options.Rules, isGeositeRule) || hasDNSRule(dnsOptions.Rules, isGeositeDNSRule),
|
needGeositeDatabase: hasRule(options.Rules, isGeositeRule) || hasDNSRule(dnsOptions.Rules, isGeositeDNSRule),
|
||||||
geoIPOptions: common.PtrValueOrDefault(options.GeoIP),
|
geoIPOptions: common.PtrValueOrDefault(options.GeoIP),
|
||||||
geositeOptions: common.PtrValueOrDefault(options.Geosite),
|
geositeOptions: common.PtrValueOrDefault(options.Geosite),
|
||||||
geositeCache: make(map[string]adapter.Rule),
|
geositeCache: make(map[string]adapter.Rule),
|
||||||
|
needFindProcess: hasRule(options.Rules, isProcessRule) || hasDNSRule(dnsOptions.Rules, isProcessDNSRule) || options.FindProcess,
|
||||||
defaultDetour: options.Final,
|
defaultDetour: options.Final,
|
||||||
defaultDomainStrategy: dns.DomainStrategy(dnsOptions.Strategy),
|
defaultDomainStrategy: dns.DomainStrategy(dnsOptions.Strategy),
|
||||||
autoDetectInterface: options.AutoDetectInterface,
|
autoDetectInterface: options.AutoDetectInterface,
|
||||||
|
@ -141,6 +147,17 @@ func NewRouter(
|
||||||
}
|
}
|
||||||
router.dnsRules = append(router.dnsRules, dnsRule)
|
router.dnsRules = append(router.dnsRules, dnsRule)
|
||||||
}
|
}
|
||||||
|
for i, ruleSetOptions := range options.RuleSet {
|
||||||
|
if _, exists := router.ruleSetMap[ruleSetOptions.Tag]; exists {
|
||||||
|
return nil, E.New("duplicate rule-set tag: ", ruleSetOptions.Tag)
|
||||||
|
}
|
||||||
|
ruleSet, err := NewRuleSet(ctx, router, router.logger, ruleSetOptions)
|
||||||
|
if err != nil {
|
||||||
|
return nil, E.Cause(err, "parse rule-set[", i, "]")
|
||||||
|
}
|
||||||
|
router.ruleSets = append(router.ruleSets, ruleSet)
|
||||||
|
router.ruleSetMap[ruleSetOptions.Tag] = ruleSet
|
||||||
|
}
|
||||||
|
|
||||||
transports := make([]dns.Transport, len(dnsOptions.Servers))
|
transports := make([]dns.Transport, len(dnsOptions.Servers))
|
||||||
dummyTransportMap := make(map[string]dns.Transport)
|
dummyTransportMap := make(map[string]dns.Transport)
|
||||||
|
@ -296,34 +313,6 @@ func NewRouter(
|
||||||
router.interfaceMonitor = interfaceMonitor
|
router.interfaceMonitor = interfaceMonitor
|
||||||
}
|
}
|
||||||
|
|
||||||
needFindProcess := hasRule(options.Rules, isProcessRule) || hasDNSRule(dnsOptions.Rules, isProcessDNSRule) || options.FindProcess
|
|
||||||
needPackageManager := C.IsAndroid && platformInterface == nil && (needFindProcess || common.Any(inbounds, func(inbound option.Inbound) bool {
|
|
||||||
return len(inbound.TunOptions.IncludePackage) > 0 || len(inbound.TunOptions.ExcludePackage) > 0
|
|
||||||
}))
|
|
||||||
if needPackageManager {
|
|
||||||
packageManager, err := tun.NewPackageManager(router)
|
|
||||||
if err != nil {
|
|
||||||
return nil, E.Cause(err, "create package manager")
|
|
||||||
}
|
|
||||||
router.packageManager = packageManager
|
|
||||||
}
|
|
||||||
if needFindProcess {
|
|
||||||
if platformInterface != nil {
|
|
||||||
router.processSearcher = platformInterface
|
|
||||||
} else {
|
|
||||||
searcher, err := process.NewSearcher(process.Config{
|
|
||||||
Logger: logFactory.NewLogger("router/process"),
|
|
||||||
PackageManager: router.packageManager,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
if err != os.ErrInvalid {
|
|
||||||
router.logger.Warn(E.Cause(err, "create process searcher"))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
router.processSearcher = searcher
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if ntpOptions.Enabled {
|
if ntpOptions.Enabled {
|
||||||
timeService, err := ntp.NewService(ctx, router, logFactory.NewLogger("ntp"), ntpOptions)
|
timeService, err := ntp.NewService(ctx, router, logFactory.NewLogger("ntp"), ntpOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -332,11 +321,6 @@ func NewRouter(
|
||||||
service.ContextWith[serviceNTP.TimeService](ctx, timeService)
|
service.ContextWith[serviceNTP.TimeService](ctx, timeService)
|
||||||
router.timeService = timeService
|
router.timeService = timeService
|
||||||
}
|
}
|
||||||
if platformInterface != nil && router.interfaceMonitor != nil && router.needWIFIState {
|
|
||||||
router.interfaceMonitor.RegisterCallback(func(_ int) {
|
|
||||||
router.updateWIFIState()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return router, nil
|
return router, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -451,12 +435,6 @@ func (r *Router) Start() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if r.packageManager != nil {
|
|
||||||
err := r.packageManager.Start()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if r.needGeositeDatabase {
|
if r.needGeositeDatabase {
|
||||||
for _, rule := range r.rules {
|
for _, rule := range r.rules {
|
||||||
err := rule.UpdateGeosite()
|
err := rule.UpdateGeosite()
|
||||||
|
@ -477,9 +455,89 @@ func (r *Router) Start() error {
|
||||||
r.geositeCache = nil
|
r.geositeCache = nil
|
||||||
r.geositeReader = nil
|
r.geositeReader = nil
|
||||||
}
|
}
|
||||||
if r.needWIFIState {
|
if r.fakeIPStore != nil {
|
||||||
|
err := r.fakeIPStore.Start()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(r.ruleSets) > 0 {
|
||||||
|
ruleSetStartContext := NewRuleSetStartContext()
|
||||||
|
var ruleSetStartGroup task.Group
|
||||||
|
for i, ruleSet := range r.ruleSets {
|
||||||
|
ruleSetInPlace := ruleSet
|
||||||
|
ruleSetStartGroup.Append0(func(ctx context.Context) error {
|
||||||
|
err := ruleSetInPlace.StartContext(ctx, ruleSetStartContext)
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "initialize rule-set[", i, "]")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
ruleSetStartGroup.Concurrency(5)
|
||||||
|
ruleSetStartGroup.FastFail()
|
||||||
|
err := ruleSetStartGroup.Run(r.ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ruleSetStartContext.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
needProcessFromRuleSet bool
|
||||||
|
needWIFIStateFromRuleSet bool
|
||||||
|
)
|
||||||
|
for _, ruleSet := range r.ruleSets {
|
||||||
|
metadata := ruleSet.Metadata()
|
||||||
|
if metadata.ContainsProcessRule {
|
||||||
|
needProcessFromRuleSet = true
|
||||||
|
}
|
||||||
|
if metadata.ContainsWIFIRule {
|
||||||
|
needWIFIStateFromRuleSet = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if needProcessFromRuleSet || r.needFindProcess {
|
||||||
|
needPackageManager := C.IsAndroid && r.platformInterface == nil
|
||||||
|
|
||||||
|
if needPackageManager {
|
||||||
|
packageManager, err := tun.NewPackageManager(r)
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "create package manager")
|
||||||
|
}
|
||||||
|
if packageManager != nil {
|
||||||
|
err = packageManager.Start()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
r.packageManager = packageManager
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.platformInterface != nil {
|
||||||
|
r.processSearcher = r.platformInterface
|
||||||
|
} else {
|
||||||
|
searcher, err := process.NewSearcher(process.Config{
|
||||||
|
Logger: r.logger,
|
||||||
|
PackageManager: r.packageManager,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
if err != os.ErrInvalid {
|
||||||
|
r.logger.Warn(E.Cause(err, "create process searcher"))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
r.processSearcher = searcher
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if needWIFIStateFromRuleSet || r.needWIFIState {
|
||||||
|
if r.platformInterface != nil && r.interfaceMonitor != nil {
|
||||||
|
r.interfaceMonitor.RegisterCallback(func(_ int) {
|
||||||
|
r.updateWIFIState()
|
||||||
|
})
|
||||||
|
}
|
||||||
r.updateWIFIState()
|
r.updateWIFIState()
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, rule := range r.rules {
|
for i, rule := range r.rules {
|
||||||
err := rule.Start()
|
err := rule.Start()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -492,12 +550,6 @@ func (r *Router) Start() error {
|
||||||
return E.Cause(err, "initialize DNS rule[", i, "]")
|
return E.Cause(err, "initialize DNS rule[", i, "]")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if r.fakeIPStore != nil {
|
|
||||||
err := r.fakeIPStore.Start()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for i, transport := range r.transports {
|
for i, transport := range r.transports {
|
||||||
err := transport.Start()
|
err := transport.Start()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -573,6 +625,14 @@ func (r *Router) Close() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Router) PostStart() error {
|
func (r *Router) PostStart() error {
|
||||||
|
if len(r.ruleSets) > 0 {
|
||||||
|
for i, ruleSet := range r.ruleSets {
|
||||||
|
err := ruleSet.PostStart()
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "post start rule-set[", i, "]")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
r.started = true
|
r.started = true
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -582,11 +642,17 @@ func (r *Router) Outbound(tag string) (adapter.Outbound, bool) {
|
||||||
return outbound, loaded
|
return outbound, loaded
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Router) DefaultOutbound(network string) adapter.Outbound {
|
func (r *Router) DefaultOutbound(network string) (adapter.Outbound, error) {
|
||||||
if network == N.NetworkTCP {
|
if network == N.NetworkTCP {
|
||||||
return r.defaultOutboundForConnection
|
if r.defaultOutboundForConnection == nil {
|
||||||
|
return nil, E.New("missing default outbound for TCP connections")
|
||||||
|
}
|
||||||
|
return r.defaultOutboundForConnection, nil
|
||||||
} else {
|
} else {
|
||||||
return r.defaultOutboundForPacketConnection
|
if r.defaultOutboundForPacketConnection == nil {
|
||||||
|
return nil, E.New("missing default outbound for UDP connections")
|
||||||
|
}
|
||||||
|
return r.defaultOutboundForPacketConnection, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -594,6 +660,11 @@ func (r *Router) FakeIPStore() adapter.FakeIPStore {
|
||||||
return r.fakeIPStore
|
return r.fakeIPStore
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *Router) RuleSet(tag string) (adapter.RuleSet, bool) {
|
||||||
|
ruleSet, loaded := r.ruleSetMap[tag]
|
||||||
|
return ruleSet, loaded
|
||||||
|
}
|
||||||
|
|
||||||
func (r *Router) RouteConnection(ctx context.Context, conn net.Conn, metadata adapter.InboundContext) error {
|
func (r *Router) RouteConnection(ctx context.Context, conn net.Conn, metadata adapter.InboundContext) error {
|
||||||
if metadata.InboundDetour != "" {
|
if metadata.InboundDetour != "" {
|
||||||
if metadata.LastInbound == metadata.InboundDetour {
|
if metadata.LastInbound == metadata.InboundDetour {
|
||||||
|
@ -882,6 +953,7 @@ func (r *Router) match0(ctx context.Context, metadata *adapter.InboundContext, d
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for i, rule := range r.rules {
|
for i, rule := range r.rules {
|
||||||
|
metadata.ResetRuleCache()
|
||||||
if rule.Match(metadata) {
|
if rule.Match(metadata) {
|
||||||
detour := rule.Outbound()
|
detour := rule.Outbound()
|
||||||
r.logger.DebugContext(ctx, "match[", i, "] ", rule.String(), " => ", detour)
|
r.logger.DebugContext(ctx, "match[", i, "] ", rule.String(), " => ", detour)
|
||||||
|
|
|
@ -43,6 +43,7 @@ func (r *Router) matchDNS(ctx context.Context) (context.Context, dns.Transport,
|
||||||
panic("no context")
|
panic("no context")
|
||||||
}
|
}
|
||||||
for i, rule := range r.dnsRules {
|
for i, rule := range r.dnsRules {
|
||||||
|
metadata.ResetRuleCache()
|
||||||
if rule.Match(metadata) {
|
if rule.Match(metadata) {
|
||||||
detour := rule.Outbound()
|
detour := rule.Outbound()
|
||||||
transport, loaded := r.transportMap[detour]
|
transport, loaded := r.transportMap[detour]
|
||||||
|
|
|
@ -13,8 +13,6 @@ import (
|
||||||
"github.com/sagernet/sing-box/common/geoip"
|
"github.com/sagernet/sing-box/common/geoip"
|
||||||
"github.com/sagernet/sing-box/common/geosite"
|
"github.com/sagernet/sing-box/common/geosite"
|
||||||
C "github.com/sagernet/sing-box/constant"
|
C "github.com/sagernet/sing-box/constant"
|
||||||
"github.com/sagernet/sing-box/option"
|
|
||||||
"github.com/sagernet/sing/common"
|
|
||||||
E "github.com/sagernet/sing/common/exceptions"
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
M "github.com/sagernet/sing/common/metadata"
|
M "github.com/sagernet/sing/common/metadata"
|
||||||
"github.com/sagernet/sing/common/rw"
|
"github.com/sagernet/sing/common/rw"
|
||||||
|
@ -243,71 +241,3 @@ func (r *Router) downloadGeositeDatabase(savePath string) error {
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func hasRule(rules []option.Rule, cond func(rule option.DefaultRule) bool) bool {
|
|
||||||
for _, rule := range rules {
|
|
||||||
switch rule.Type {
|
|
||||||
case C.RuleTypeDefault:
|
|
||||||
if cond(rule.DefaultOptions) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
case C.RuleTypeLogical:
|
|
||||||
if hasRule(rule.LogicalOptions.Rules, cond) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func hasDNSRule(rules []option.DNSRule, cond func(rule option.DefaultDNSRule) bool) bool {
|
|
||||||
for _, rule := range rules {
|
|
||||||
switch rule.Type {
|
|
||||||
case C.RuleTypeDefault:
|
|
||||||
if cond(rule.DefaultOptions) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
case C.RuleTypeLogical:
|
|
||||||
if hasDNSRule(rule.LogicalOptions.Rules, cond) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func isGeoIPRule(rule option.DefaultRule) bool {
|
|
||||||
return len(rule.SourceGeoIP) > 0 && common.Any(rule.SourceGeoIP, notPrivateNode) || len(rule.GeoIP) > 0 && common.Any(rule.GeoIP, notPrivateNode)
|
|
||||||
}
|
|
||||||
|
|
||||||
func isGeoIPDNSRule(rule option.DefaultDNSRule) bool {
|
|
||||||
return len(rule.SourceGeoIP) > 0 && common.Any(rule.SourceGeoIP, notPrivateNode)
|
|
||||||
}
|
|
||||||
|
|
||||||
func isGeositeRule(rule option.DefaultRule) bool {
|
|
||||||
return len(rule.Geosite) > 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func isGeositeDNSRule(rule option.DefaultDNSRule) bool {
|
|
||||||
return len(rule.Geosite) > 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func isProcessRule(rule option.DefaultRule) bool {
|
|
||||||
return len(rule.ProcessName) > 0 || len(rule.ProcessPath) > 0 || len(rule.PackageName) > 0 || len(rule.User) > 0 || len(rule.UserID) > 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func isProcessDNSRule(rule option.DefaultDNSRule) bool {
|
|
||||||
return len(rule.ProcessName) > 0 || len(rule.ProcessPath) > 0 || len(rule.PackageName) > 0 || len(rule.User) > 0 || len(rule.UserID) > 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func notPrivateNode(code string) bool {
|
|
||||||
return code != "private"
|
|
||||||
}
|
|
||||||
|
|
||||||
func isWIFIRule(rule option.DefaultRule) bool {
|
|
||||||
return len(rule.WIFISSID) > 0 || len(rule.WIFIBSSID) > 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func isWIFIDNSRule(rule option.DefaultDNSRule) bool {
|
|
||||||
return len(rule.WIFISSID) > 0 || len(rule.WIFIBSSID) > 0
|
|
||||||
}
|
|
||||||
|
|
99
route/router_rule.go
Normal file
99
route/router_rule.go
Normal file
|
@ -0,0 +1,99 @@
|
||||||
|
package route
|
||||||
|
|
||||||
|
import (
|
||||||
|
C "github.com/sagernet/sing-box/constant"
|
||||||
|
"github.com/sagernet/sing-box/option"
|
||||||
|
"github.com/sagernet/sing/common"
|
||||||
|
)
|
||||||
|
|
||||||
|
func hasRule(rules []option.Rule, cond func(rule option.DefaultRule) bool) bool {
|
||||||
|
for _, rule := range rules {
|
||||||
|
switch rule.Type {
|
||||||
|
case C.RuleTypeDefault:
|
||||||
|
if cond(rule.DefaultOptions) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
case C.RuleTypeLogical:
|
||||||
|
if hasRule(rule.LogicalOptions.Rules, cond) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasDNSRule(rules []option.DNSRule, cond func(rule option.DefaultDNSRule) bool) bool {
|
||||||
|
for _, rule := range rules {
|
||||||
|
switch rule.Type {
|
||||||
|
case C.RuleTypeDefault:
|
||||||
|
if cond(rule.DefaultOptions) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
case C.RuleTypeLogical:
|
||||||
|
if hasDNSRule(rule.LogicalOptions.Rules, cond) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasHeadlessRule(rules []option.HeadlessRule, cond func(rule option.DefaultHeadlessRule) bool) bool {
|
||||||
|
for _, rule := range rules {
|
||||||
|
switch rule.Type {
|
||||||
|
case C.RuleTypeDefault:
|
||||||
|
if cond(rule.DefaultOptions) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
case C.RuleTypeLogical:
|
||||||
|
if hasHeadlessRule(rule.LogicalOptions.Rules, cond) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func isGeoIPRule(rule option.DefaultRule) bool {
|
||||||
|
return len(rule.SourceGeoIP) > 0 && common.Any(rule.SourceGeoIP, notPrivateNode) || len(rule.GeoIP) > 0 && common.Any(rule.GeoIP, notPrivateNode)
|
||||||
|
}
|
||||||
|
|
||||||
|
func isGeoIPDNSRule(rule option.DefaultDNSRule) bool {
|
||||||
|
return len(rule.SourceGeoIP) > 0 && common.Any(rule.SourceGeoIP, notPrivateNode)
|
||||||
|
}
|
||||||
|
|
||||||
|
func isGeositeRule(rule option.DefaultRule) bool {
|
||||||
|
return len(rule.Geosite) > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func isGeositeDNSRule(rule option.DefaultDNSRule) bool {
|
||||||
|
return len(rule.Geosite) > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func isProcessRule(rule option.DefaultRule) bool {
|
||||||
|
return len(rule.ProcessName) > 0 || len(rule.ProcessPath) > 0 || len(rule.PackageName) > 0 || len(rule.User) > 0 || len(rule.UserID) > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func isProcessDNSRule(rule option.DefaultDNSRule) bool {
|
||||||
|
return len(rule.ProcessName) > 0 || len(rule.ProcessPath) > 0 || len(rule.PackageName) > 0 || len(rule.User) > 0 || len(rule.UserID) > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func isProcessHeadlessRule(rule option.DefaultHeadlessRule) bool {
|
||||||
|
return len(rule.ProcessName) > 0 || len(rule.ProcessPath) > 0 || len(rule.PackageName) > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func notPrivateNode(code string) bool {
|
||||||
|
return code != "private"
|
||||||
|
}
|
||||||
|
|
||||||
|
func isWIFIRule(rule option.DefaultRule) bool {
|
||||||
|
return len(rule.WIFISSID) > 0 || len(rule.WIFIBSSID) > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func isWIFIDNSRule(rule option.DefaultDNSRule) bool {
|
||||||
|
return len(rule.WIFISSID) > 0 || len(rule.WIFIBSSID) > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func isWIFIHeadlessRule(rule option.DefaultHeadlessRule) bool {
|
||||||
|
return len(rule.WIFISSID) > 0 || len(rule.WIFIBSSID) > 0
|
||||||
|
}
|
|
@ -1,6 +1,7 @@
|
||||||
package route
|
package route
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"io"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/sagernet/sing-box/adapter"
|
"github.com/sagernet/sing-box/adapter"
|
||||||
|
@ -16,6 +17,7 @@ type abstractDefaultRule struct {
|
||||||
destinationAddressItems []RuleItem
|
destinationAddressItems []RuleItem
|
||||||
destinationPortItems []RuleItem
|
destinationPortItems []RuleItem
|
||||||
allItems []RuleItem
|
allItems []RuleItem
|
||||||
|
ruleSetItem RuleItem
|
||||||
invert bool
|
invert bool
|
||||||
outbound string
|
outbound string
|
||||||
}
|
}
|
||||||
|
@ -61,63 +63,63 @@ func (r *abstractDefaultRule) Match(metadata *adapter.InboundContext) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(r.sourceAddressItems) > 0 && !metadata.SourceAddressMatch {
|
||||||
|
for _, item := range r.sourceAddressItems {
|
||||||
|
if item.Match(metadata) {
|
||||||
|
metadata.SourceAddressMatch = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(r.sourcePortItems) > 0 && !metadata.SourceAddressMatch {
|
||||||
|
for _, item := range r.sourcePortItems {
|
||||||
|
if item.Match(metadata) {
|
||||||
|
metadata.SourcePortMatch = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(r.destinationAddressItems) > 0 && !metadata.SourceAddressMatch {
|
||||||
|
for _, item := range r.destinationAddressItems {
|
||||||
|
if item.Match(metadata) {
|
||||||
|
metadata.DestinationAddressMatch = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(r.destinationPortItems) > 0 && !metadata.SourceAddressMatch {
|
||||||
|
for _, item := range r.destinationPortItems {
|
||||||
|
if item.Match(metadata) {
|
||||||
|
metadata.DestinationPortMatch = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for _, item := range r.items {
|
for _, item := range r.items {
|
||||||
if !item.Match(metadata) {
|
if !item.Match(metadata) {
|
||||||
return r.invert
|
return r.invert
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(r.sourceAddressItems) > 0 {
|
if len(r.sourceAddressItems) > 0 && !metadata.SourceAddressMatch {
|
||||||
var sourceAddressMatch bool
|
|
||||||
for _, item := range r.sourceAddressItems {
|
|
||||||
if item.Match(metadata) {
|
|
||||||
sourceAddressMatch = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !sourceAddressMatch {
|
|
||||||
return r.invert
|
return r.invert
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if len(r.sourcePortItems) > 0 {
|
if len(r.sourcePortItems) > 0 && !metadata.SourcePortMatch {
|
||||||
var sourcePortMatch bool
|
|
||||||
for _, item := range r.sourcePortItems {
|
|
||||||
if item.Match(metadata) {
|
|
||||||
sourcePortMatch = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !sourcePortMatch {
|
|
||||||
return r.invert
|
return r.invert
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if len(r.destinationAddressItems) > 0 {
|
if len(r.destinationAddressItems) > 0 && !metadata.DestinationAddressMatch {
|
||||||
var destinationAddressMatch bool
|
|
||||||
for _, item := range r.destinationAddressItems {
|
|
||||||
if item.Match(metadata) {
|
|
||||||
destinationAddressMatch = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !destinationAddressMatch {
|
|
||||||
return r.invert
|
return r.invert
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if len(r.destinationPortItems) > 0 {
|
if len(r.destinationPortItems) > 0 && !metadata.DestinationPortMatch {
|
||||||
var destinationPortMatch bool
|
|
||||||
for _, item := range r.destinationPortItems {
|
|
||||||
if item.Match(metadata) {
|
|
||||||
destinationPortMatch = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !destinationPortMatch {
|
|
||||||
return r.invert
|
return r.invert
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
return !r.invert
|
return !r.invert
|
||||||
}
|
}
|
||||||
|
@ -135,7 +137,7 @@ func (r *abstractDefaultRule) String() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type abstractLogicalRule struct {
|
type abstractLogicalRule struct {
|
||||||
rules []adapter.Rule
|
rules []adapter.HeadlessRule
|
||||||
mode string
|
mode string
|
||||||
invert bool
|
invert bool
|
||||||
outbound string
|
outbound string
|
||||||
|
@ -146,7 +148,10 @@ func (r *abstractLogicalRule) Type() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *abstractLogicalRule) UpdateGeosite() error {
|
func (r *abstractLogicalRule) UpdateGeosite() error {
|
||||||
for _, rule := range r.rules {
|
for _, rule := range common.FilterIsInstance(r.rules, func(it adapter.HeadlessRule) (adapter.Rule, bool) {
|
||||||
|
rule, loaded := it.(adapter.Rule)
|
||||||
|
return rule, loaded
|
||||||
|
}) {
|
||||||
err := rule.UpdateGeosite()
|
err := rule.UpdateGeosite()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -156,7 +161,10 @@ func (r *abstractLogicalRule) UpdateGeosite() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *abstractLogicalRule) Start() error {
|
func (r *abstractLogicalRule) Start() error {
|
||||||
for _, rule := range r.rules {
|
for _, rule := range common.FilterIsInstance(r.rules, func(it adapter.HeadlessRule) (common.Starter, bool) {
|
||||||
|
rule, loaded := it.(common.Starter)
|
||||||
|
return rule, loaded
|
||||||
|
}) {
|
||||||
err := rule.Start()
|
err := rule.Start()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -166,7 +174,10 @@ func (r *abstractLogicalRule) Start() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *abstractLogicalRule) Close() error {
|
func (r *abstractLogicalRule) Close() error {
|
||||||
for _, rule := range r.rules {
|
for _, rule := range common.FilterIsInstance(r.rules, func(it adapter.HeadlessRule) (io.Closer, bool) {
|
||||||
|
rule, loaded := it.(io.Closer)
|
||||||
|
return rule, loaded
|
||||||
|
}) {
|
||||||
err := rule.Close()
|
err := rule.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -177,11 +188,13 @@ func (r *abstractLogicalRule) Close() error {
|
||||||
|
|
||||||
func (r *abstractLogicalRule) Match(metadata *adapter.InboundContext) bool {
|
func (r *abstractLogicalRule) Match(metadata *adapter.InboundContext) bool {
|
||||||
if r.mode == C.LogicalTypeAnd {
|
if r.mode == C.LogicalTypeAnd {
|
||||||
return common.All(r.rules, func(it adapter.Rule) bool {
|
return common.All(r.rules, func(it adapter.HeadlessRule) bool {
|
||||||
|
metadata.ResetRuleCache()
|
||||||
return it.Match(metadata)
|
return it.Match(metadata)
|
||||||
}) != r.invert
|
}) != r.invert
|
||||||
} else {
|
} else {
|
||||||
return common.Any(r.rules, func(it adapter.Rule) bool {
|
return common.Any(r.rules, func(it adapter.HeadlessRule) bool {
|
||||||
|
metadata.ResetRuleCache()
|
||||||
return it.Match(metadata)
|
return it.Match(metadata)
|
||||||
}) != r.invert
|
}) != r.invert
|
||||||
}
|
}
|
||||||
|
|
|
@ -194,6 +194,11 @@ func NewDefaultRule(router adapter.Router, logger log.ContextLogger, options opt
|
||||||
rule.items = append(rule.items, item)
|
rule.items = append(rule.items, item)
|
||||||
rule.allItems = append(rule.allItems, item)
|
rule.allItems = append(rule.allItems, item)
|
||||||
}
|
}
|
||||||
|
if len(options.RuleSet) > 0 {
|
||||||
|
item := NewRuleSetItem(router, options.RuleSet, options.RuleSetIPCIDRMatchSource)
|
||||||
|
rule.items = append(rule.items, item)
|
||||||
|
rule.allItems = append(rule.allItems, item)
|
||||||
|
}
|
||||||
return rule, nil
|
return rule, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -206,7 +211,7 @@ type LogicalRule struct {
|
||||||
func NewLogicalRule(router adapter.Router, logger log.ContextLogger, options option.LogicalRule) (*LogicalRule, error) {
|
func NewLogicalRule(router adapter.Router, logger log.ContextLogger, options option.LogicalRule) (*LogicalRule, error) {
|
||||||
r := &LogicalRule{
|
r := &LogicalRule{
|
||||||
abstractLogicalRule{
|
abstractLogicalRule{
|
||||||
rules: make([]adapter.Rule, len(options.Rules)),
|
rules: make([]adapter.HeadlessRule, len(options.Rules)),
|
||||||
invert: options.Invert,
|
invert: options.Invert,
|
||||||
outbound: options.Outbound,
|
outbound: options.Outbound,
|
||||||
},
|
},
|
||||||
|
|
|
@ -190,6 +190,11 @@ func NewDefaultDNSRule(router adapter.Router, logger log.ContextLogger, options
|
||||||
rule.items = append(rule.items, item)
|
rule.items = append(rule.items, item)
|
||||||
rule.allItems = append(rule.allItems, item)
|
rule.allItems = append(rule.allItems, item)
|
||||||
}
|
}
|
||||||
|
if len(options.RuleSet) > 0 {
|
||||||
|
item := NewRuleSetItem(router, options.RuleSet, false)
|
||||||
|
rule.items = append(rule.items, item)
|
||||||
|
rule.allItems = append(rule.allItems, item)
|
||||||
|
}
|
||||||
return rule, nil
|
return rule, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -212,7 +217,7 @@ type LogicalDNSRule struct {
|
||||||
func NewLogicalDNSRule(router adapter.Router, logger log.ContextLogger, options option.LogicalDNSRule) (*LogicalDNSRule, error) {
|
func NewLogicalDNSRule(router adapter.Router, logger log.ContextLogger, options option.LogicalDNSRule) (*LogicalDNSRule, error) {
|
||||||
r := &LogicalDNSRule{
|
r := &LogicalDNSRule{
|
||||||
abstractLogicalRule: abstractLogicalRule{
|
abstractLogicalRule: abstractLogicalRule{
|
||||||
rules: make([]adapter.Rule, len(options.Rules)),
|
rules: make([]adapter.HeadlessRule, len(options.Rules)),
|
||||||
invert: options.Invert,
|
invert: options.Invert,
|
||||||
outbound: options.Server,
|
outbound: options.Server,
|
||||||
},
|
},
|
||||||
|
|
173
route/rule_headless.go
Normal file
173
route/rule_headless.go
Normal file
|
@ -0,0 +1,173 @@
|
||||||
|
package route
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/sagernet/sing-box/adapter"
|
||||||
|
C "github.com/sagernet/sing-box/constant"
|
||||||
|
"github.com/sagernet/sing-box/option"
|
||||||
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewHeadlessRule(router adapter.Router, options option.HeadlessRule) (adapter.HeadlessRule, error) {
|
||||||
|
switch options.Type {
|
||||||
|
case "", C.RuleTypeDefault:
|
||||||
|
if !options.DefaultOptions.IsValid() {
|
||||||
|
return nil, E.New("missing conditions")
|
||||||
|
}
|
||||||
|
return NewDefaultHeadlessRule(router, options.DefaultOptions)
|
||||||
|
case C.RuleTypeLogical:
|
||||||
|
if !options.LogicalOptions.IsValid() {
|
||||||
|
return nil, E.New("missing conditions")
|
||||||
|
}
|
||||||
|
return NewLogicalHeadlessRule(router, options.LogicalOptions)
|
||||||
|
default:
|
||||||
|
return nil, E.New("unknown rule type: ", options.Type)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ adapter.HeadlessRule = (*DefaultHeadlessRule)(nil)
|
||||||
|
|
||||||
|
type DefaultHeadlessRule struct {
|
||||||
|
abstractDefaultRule
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDefaultHeadlessRule(router adapter.Router, options option.DefaultHeadlessRule) (*DefaultHeadlessRule, error) {
|
||||||
|
rule := &DefaultHeadlessRule{
|
||||||
|
abstractDefaultRule{
|
||||||
|
invert: options.Invert,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if len(options.Network) > 0 {
|
||||||
|
item := NewNetworkItem(options.Network)
|
||||||
|
rule.items = append(rule.items, item)
|
||||||
|
rule.allItems = append(rule.allItems, item)
|
||||||
|
}
|
||||||
|
if len(options.Domain) > 0 || len(options.DomainSuffix) > 0 {
|
||||||
|
item := NewDomainItem(options.Domain, options.DomainSuffix)
|
||||||
|
rule.destinationAddressItems = append(rule.destinationAddressItems, item)
|
||||||
|
rule.allItems = append(rule.allItems, item)
|
||||||
|
} else if options.DomainMatcher != nil {
|
||||||
|
item := NewRawDomainItem(options.DomainMatcher)
|
||||||
|
rule.destinationAddressItems = append(rule.destinationAddressItems, item)
|
||||||
|
rule.allItems = append(rule.allItems, item)
|
||||||
|
}
|
||||||
|
if len(options.DomainKeyword) > 0 {
|
||||||
|
item := NewDomainKeywordItem(options.DomainKeyword)
|
||||||
|
rule.destinationAddressItems = append(rule.destinationAddressItems, item)
|
||||||
|
rule.allItems = append(rule.allItems, item)
|
||||||
|
}
|
||||||
|
if len(options.DomainRegex) > 0 {
|
||||||
|
item, err := NewDomainRegexItem(options.DomainRegex)
|
||||||
|
if err != nil {
|
||||||
|
return nil, E.Cause(err, "domain_regex")
|
||||||
|
}
|
||||||
|
rule.destinationAddressItems = append(rule.destinationAddressItems, item)
|
||||||
|
rule.allItems = append(rule.allItems, item)
|
||||||
|
}
|
||||||
|
if len(options.SourceIPCIDR) > 0 {
|
||||||
|
item, err := NewIPCIDRItem(true, options.SourceIPCIDR)
|
||||||
|
if err != nil {
|
||||||
|
return nil, E.Cause(err, "source_ipcidr")
|
||||||
|
}
|
||||||
|
rule.sourceAddressItems = append(rule.sourceAddressItems, item)
|
||||||
|
rule.allItems = append(rule.allItems, item)
|
||||||
|
} else if options.SourceIPSet != nil {
|
||||||
|
item := NewRawIPCIDRItem(true, options.SourceIPSet)
|
||||||
|
rule.sourceAddressItems = append(rule.sourceAddressItems, item)
|
||||||
|
rule.allItems = append(rule.allItems, item)
|
||||||
|
}
|
||||||
|
if len(options.IPCIDR) > 0 {
|
||||||
|
item, err := NewIPCIDRItem(false, options.IPCIDR)
|
||||||
|
if err != nil {
|
||||||
|
return nil, E.Cause(err, "ipcidr")
|
||||||
|
}
|
||||||
|
rule.destinationAddressItems = append(rule.destinationAddressItems, item)
|
||||||
|
rule.allItems = append(rule.allItems, item)
|
||||||
|
} else if options.IPSet != nil {
|
||||||
|
item := NewRawIPCIDRItem(false, options.IPSet)
|
||||||
|
rule.destinationAddressItems = append(rule.destinationAddressItems, item)
|
||||||
|
rule.allItems = append(rule.allItems, item)
|
||||||
|
}
|
||||||
|
if len(options.SourcePort) > 0 {
|
||||||
|
item := NewPortItem(true, options.SourcePort)
|
||||||
|
rule.sourcePortItems = append(rule.sourcePortItems, item)
|
||||||
|
rule.allItems = append(rule.allItems, item)
|
||||||
|
}
|
||||||
|
if len(options.SourcePortRange) > 0 {
|
||||||
|
item, err := NewPortRangeItem(true, options.SourcePortRange)
|
||||||
|
if err != nil {
|
||||||
|
return nil, E.Cause(err, "source_port_range")
|
||||||
|
}
|
||||||
|
rule.sourcePortItems = append(rule.sourcePortItems, item)
|
||||||
|
rule.allItems = append(rule.allItems, item)
|
||||||
|
}
|
||||||
|
if len(options.Port) > 0 {
|
||||||
|
item := NewPortItem(false, options.Port)
|
||||||
|
rule.destinationPortItems = append(rule.destinationPortItems, item)
|
||||||
|
rule.allItems = append(rule.allItems, item)
|
||||||
|
}
|
||||||
|
if len(options.PortRange) > 0 {
|
||||||
|
item, err := NewPortRangeItem(false, options.PortRange)
|
||||||
|
if err != nil {
|
||||||
|
return nil, E.Cause(err, "port_range")
|
||||||
|
}
|
||||||
|
rule.destinationPortItems = append(rule.destinationPortItems, item)
|
||||||
|
rule.allItems = append(rule.allItems, item)
|
||||||
|
}
|
||||||
|
if len(options.ProcessName) > 0 {
|
||||||
|
item := NewProcessItem(options.ProcessName)
|
||||||
|
rule.items = append(rule.items, item)
|
||||||
|
rule.allItems = append(rule.allItems, item)
|
||||||
|
}
|
||||||
|
if len(options.ProcessPath) > 0 {
|
||||||
|
item := NewProcessPathItem(options.ProcessPath)
|
||||||
|
rule.items = append(rule.items, item)
|
||||||
|
rule.allItems = append(rule.allItems, item)
|
||||||
|
}
|
||||||
|
if len(options.PackageName) > 0 {
|
||||||
|
item := NewPackageNameItem(options.PackageName)
|
||||||
|
rule.items = append(rule.items, item)
|
||||||
|
rule.allItems = append(rule.allItems, item)
|
||||||
|
}
|
||||||
|
if len(options.WIFISSID) > 0 {
|
||||||
|
item := NewWIFISSIDItem(router, options.WIFISSID)
|
||||||
|
rule.items = append(rule.items, item)
|
||||||
|
rule.allItems = append(rule.allItems, item)
|
||||||
|
}
|
||||||
|
if len(options.WIFIBSSID) > 0 {
|
||||||
|
item := NewWIFIBSSIDItem(router, options.WIFIBSSID)
|
||||||
|
rule.items = append(rule.items, item)
|
||||||
|
rule.allItems = append(rule.allItems, item)
|
||||||
|
}
|
||||||
|
return rule, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ adapter.HeadlessRule = (*LogicalHeadlessRule)(nil)
|
||||||
|
|
||||||
|
type LogicalHeadlessRule struct {
|
||||||
|
abstractLogicalRule
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewLogicalHeadlessRule(router adapter.Router, options option.LogicalHeadlessRule) (*LogicalHeadlessRule, error) {
|
||||||
|
r := &LogicalHeadlessRule{
|
||||||
|
abstractLogicalRule{
|
||||||
|
rules: make([]adapter.HeadlessRule, len(options.Rules)),
|
||||||
|
invert: options.Invert,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
switch options.Mode {
|
||||||
|
case C.LogicalTypeAnd:
|
||||||
|
r.mode = C.LogicalTypeAnd
|
||||||
|
case C.LogicalTypeOr:
|
||||||
|
r.mode = C.LogicalTypeOr
|
||||||
|
default:
|
||||||
|
return nil, E.New("unknown logical mode: ", options.Mode)
|
||||||
|
}
|
||||||
|
for i, subRule := range options.Rules {
|
||||||
|
rule, err := NewHeadlessRule(router, subRule)
|
||||||
|
if err != nil {
|
||||||
|
return nil, E.Cause(err, "sub rule[", i, "]")
|
||||||
|
}
|
||||||
|
r.rules[i] = rule
|
||||||
|
}
|
||||||
|
return r, nil
|
||||||
|
}
|
|
@ -31,7 +31,7 @@ func NewIPCIDRItem(isSource bool, prefixStrings []string) (*IPCIDRItem, error) {
|
||||||
builder.Add(addr)
|
builder.Add(addr)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
return nil, E.Cause(err, "parse ip_cidr [", i, "]")
|
return nil, E.Cause(err, "parse [", i, "]")
|
||||||
}
|
}
|
||||||
var description string
|
var description string
|
||||||
if isSource {
|
if isSource {
|
||||||
|
@ -57,8 +57,23 @@ func NewIPCIDRItem(isSource bool, prefixStrings []string) (*IPCIDRItem, error) {
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func NewRawIPCIDRItem(isSource bool, ipSet *netipx.IPSet) *IPCIDRItem {
|
||||||
|
var description string
|
||||||
|
if isSource {
|
||||||
|
description = "source_ipcidr="
|
||||||
|
} else {
|
||||||
|
description = "ipcidr="
|
||||||
|
}
|
||||||
|
description += "<binary>"
|
||||||
|
return &IPCIDRItem{
|
||||||
|
ipSet: ipSet,
|
||||||
|
isSource: isSource,
|
||||||
|
description: description,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (r *IPCIDRItem) Match(metadata *adapter.InboundContext) bool {
|
func (r *IPCIDRItem) Match(metadata *adapter.InboundContext) bool {
|
||||||
if r.isSource {
|
if r.isSource || metadata.QueryType != 0 || metadata.IPCIDRMatchSource {
|
||||||
return r.ipSet.Contains(metadata.Source.Addr)
|
return r.ipSet.Contains(metadata.Source.Addr)
|
||||||
} else {
|
} else {
|
||||||
if metadata.Destination.IsIP() {
|
if metadata.Destination.IsIP() {
|
||||||
|
|
|
@ -43,6 +43,13 @@ func NewDomainItem(domains []string, domainSuffixes []string) *DomainItem {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func NewRawDomainItem(matcher *domain.Matcher) *DomainItem {
|
||||||
|
return &DomainItem{
|
||||||
|
matcher,
|
||||||
|
"domain/domain_suffix=<binary>",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (r *DomainItem) Match(metadata *adapter.InboundContext) bool {
|
func (r *DomainItem) Match(metadata *adapter.InboundContext) bool {
|
||||||
var domainHost string
|
var domainHost string
|
||||||
if metadata.Domain != "" {
|
if metadata.Domain != "" {
|
||||||
|
|
55
route/rule_item_rule_set.go
Normal file
55
route/rule_item_rule_set.go
Normal file
|
@ -0,0 +1,55 @@
|
||||||
|
package route
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/sagernet/sing-box/adapter"
|
||||||
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
|
F "github.com/sagernet/sing/common/format"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ RuleItem = (*RuleSetItem)(nil)
|
||||||
|
|
||||||
|
type RuleSetItem struct {
|
||||||
|
router adapter.Router
|
||||||
|
tagList []string
|
||||||
|
setList []adapter.HeadlessRule
|
||||||
|
ipcidrMatchSource bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewRuleSetItem(router adapter.Router, tagList []string, ipCIDRMatchSource bool) *RuleSetItem {
|
||||||
|
return &RuleSetItem{
|
||||||
|
router: router,
|
||||||
|
tagList: tagList,
|
||||||
|
ipcidrMatchSource: ipCIDRMatchSource,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RuleSetItem) Start() error {
|
||||||
|
for _, tag := range r.tagList {
|
||||||
|
ruleSet, loaded := r.router.RuleSet(tag)
|
||||||
|
if !loaded {
|
||||||
|
return E.New("rule-set not found: ", tag)
|
||||||
|
}
|
||||||
|
r.setList = append(r.setList, ruleSet)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RuleSetItem) Match(metadata *adapter.InboundContext) bool {
|
||||||
|
metadata.IPCIDRMatchSource = r.ipcidrMatchSource
|
||||||
|
for _, ruleSet := range r.setList {
|
||||||
|
if ruleSet.Match(metadata) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RuleSetItem) String() string {
|
||||||
|
if len(r.tagList) == 1 {
|
||||||
|
return F.ToString("rule_set=", r.tagList[0])
|
||||||
|
} else {
|
||||||
|
return F.ToString("rule_set=[", strings.Join(r.tagList, " "), "]")
|
||||||
|
}
|
||||||
|
}
|
67
route/rule_set.go
Normal file
67
route/rule_set.go
Normal file
|
@ -0,0 +1,67 @@
|
||||||
|
package route
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/sagernet/sing-box/adapter"
|
||||||
|
C "github.com/sagernet/sing-box/constant"
|
||||||
|
"github.com/sagernet/sing-box/option"
|
||||||
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
|
"github.com/sagernet/sing/common/logger"
|
||||||
|
M "github.com/sagernet/sing/common/metadata"
|
||||||
|
N "github.com/sagernet/sing/common/network"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewRuleSet(ctx context.Context, router adapter.Router, logger logger.ContextLogger, options option.RuleSet) (adapter.RuleSet, error) {
|
||||||
|
switch options.Type {
|
||||||
|
case C.RuleSetTypeLocal:
|
||||||
|
return NewLocalRuleSet(router, options)
|
||||||
|
case C.RuleSetTypeRemote:
|
||||||
|
return NewRemoteRuleSet(ctx, router, logger, options), nil
|
||||||
|
default:
|
||||||
|
return nil, E.New("unknown rule set type: ", options.Type)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ adapter.RuleSetStartContext = (*RuleSetStartContext)(nil)
|
||||||
|
|
||||||
|
type RuleSetStartContext struct {
|
||||||
|
access sync.Mutex
|
||||||
|
httpClientCache map[string]*http.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewRuleSetStartContext() *RuleSetStartContext {
|
||||||
|
return &RuleSetStartContext{
|
||||||
|
httpClientCache: make(map[string]*http.Client),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *RuleSetStartContext) HTTPClient(detour string, dialer N.Dialer) *http.Client {
|
||||||
|
c.access.Lock()
|
||||||
|
defer c.access.Unlock()
|
||||||
|
if httpClient, loaded := c.httpClientCache[detour]; loaded {
|
||||||
|
return httpClient
|
||||||
|
}
|
||||||
|
httpClient := &http.Client{
|
||||||
|
Transport: &http.Transport{
|
||||||
|
ForceAttemptHTTP2: true,
|
||||||
|
TLSHandshakeTimeout: C.TCPTimeout,
|
||||||
|
DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) {
|
||||||
|
return dialer.DialContext(ctx, network, M.ParseSocksaddr(addr))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
c.httpClientCache[detour] = httpClient
|
||||||
|
return httpClient
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *RuleSetStartContext) Close() {
|
||||||
|
c.access.Lock()
|
||||||
|
defer c.access.Unlock()
|
||||||
|
for _, client := range c.httpClientCache {
|
||||||
|
client.CloseIdleConnections()
|
||||||
|
}
|
||||||
|
}
|
82
route/rule_set_local.go
Normal file
82
route/rule_set_local.go
Normal file
|
@ -0,0 +1,82 @@
|
||||||
|
package route
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/sagernet/sing-box/adapter"
|
||||||
|
"github.com/sagernet/sing-box/common/json"
|
||||||
|
"github.com/sagernet/sing-box/common/srs"
|
||||||
|
C "github.com/sagernet/sing-box/constant"
|
||||||
|
"github.com/sagernet/sing-box/option"
|
||||||
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ adapter.RuleSet = (*LocalRuleSet)(nil)
|
||||||
|
|
||||||
|
type LocalRuleSet struct {
|
||||||
|
rules []adapter.HeadlessRule
|
||||||
|
metadata adapter.RuleSetMetadata
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewLocalRuleSet(router adapter.Router, options option.RuleSet) (*LocalRuleSet, error) {
|
||||||
|
setFile, err := os.Open(options.LocalOptions.Path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var plainRuleSet option.PlainRuleSet
|
||||||
|
switch options.Format {
|
||||||
|
case C.RuleSetFormatSource, "":
|
||||||
|
var compat option.PlainRuleSetCompat
|
||||||
|
decoder := json.NewDecoder(json.NewCommentFilter(setFile))
|
||||||
|
decoder.DisallowUnknownFields()
|
||||||
|
err = decoder.Decode(&compat)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
plainRuleSet = compat.Upgrade()
|
||||||
|
case C.RuleSetFormatBinary:
|
||||||
|
plainRuleSet, err = srs.Read(setFile, false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return nil, E.New("unknown rule set format: ", options.Format)
|
||||||
|
}
|
||||||
|
rules := make([]adapter.HeadlessRule, len(plainRuleSet.Rules))
|
||||||
|
for i, ruleOptions := range plainRuleSet.Rules {
|
||||||
|
rules[i], err = NewHeadlessRule(router, ruleOptions)
|
||||||
|
if err != nil {
|
||||||
|
return nil, E.Cause(err, "parse rule_set.rules.[", i, "]")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var metadata adapter.RuleSetMetadata
|
||||||
|
metadata.ContainsProcessRule = hasHeadlessRule(plainRuleSet.Rules, isProcessHeadlessRule)
|
||||||
|
metadata.ContainsWIFIRule = hasHeadlessRule(plainRuleSet.Rules, isWIFIHeadlessRule)
|
||||||
|
return &LocalRuleSet{rules, metadata}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *LocalRuleSet) Match(metadata *adapter.InboundContext) bool {
|
||||||
|
for _, rule := range s.rules {
|
||||||
|
if rule.Match(metadata) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *LocalRuleSet) StartContext(ctx context.Context, startContext adapter.RuleSetStartContext) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *LocalRuleSet) PostStart() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *LocalRuleSet) Metadata() adapter.RuleSetMetadata {
|
||||||
|
return s.metadata
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *LocalRuleSet) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
264
route/rule_set_remote.go
Normal file
264
route/rule_set_remote.go
Normal file
|
@ -0,0 +1,264 @@
|
||||||
|
package route
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"runtime"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/sagernet/sing-box/adapter"
|
||||||
|
"github.com/sagernet/sing-box/common/json"
|
||||||
|
"github.com/sagernet/sing-box/common/srs"
|
||||||
|
C "github.com/sagernet/sing-box/constant"
|
||||||
|
"github.com/sagernet/sing-box/option"
|
||||||
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
|
"github.com/sagernet/sing/common/logger"
|
||||||
|
M "github.com/sagernet/sing/common/metadata"
|
||||||
|
N "github.com/sagernet/sing/common/network"
|
||||||
|
"github.com/sagernet/sing/service"
|
||||||
|
"github.com/sagernet/sing/service/pause"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ adapter.RuleSet = (*RemoteRuleSet)(nil)
|
||||||
|
|
||||||
|
type RemoteRuleSet struct {
|
||||||
|
ctx context.Context
|
||||||
|
cancel context.CancelFunc
|
||||||
|
router adapter.Router
|
||||||
|
logger logger.ContextLogger
|
||||||
|
options option.RuleSet
|
||||||
|
metadata adapter.RuleSetMetadata
|
||||||
|
updateInterval time.Duration
|
||||||
|
dialer N.Dialer
|
||||||
|
rules []adapter.HeadlessRule
|
||||||
|
lastUpdated time.Time
|
||||||
|
lastEtag string
|
||||||
|
updateTicker *time.Ticker
|
||||||
|
pauseManager pause.Manager
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewRemoteRuleSet(ctx context.Context, router adapter.Router, logger logger.ContextLogger, options option.RuleSet) *RemoteRuleSet {
|
||||||
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
|
var updateInterval time.Duration
|
||||||
|
if options.RemoteOptions.UpdateInterval > 0 {
|
||||||
|
updateInterval = time.Duration(options.RemoteOptions.UpdateInterval)
|
||||||
|
} else {
|
||||||
|
updateInterval = 24 * time.Hour
|
||||||
|
}
|
||||||
|
return &RemoteRuleSet{
|
||||||
|
ctx: ctx,
|
||||||
|
cancel: cancel,
|
||||||
|
router: router,
|
||||||
|
logger: logger,
|
||||||
|
options: options,
|
||||||
|
updateInterval: updateInterval,
|
||||||
|
pauseManager: pause.ManagerFromContext(ctx),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *RemoteRuleSet) Match(metadata *adapter.InboundContext) bool {
|
||||||
|
for _, rule := range s.rules {
|
||||||
|
if rule.Match(metadata) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *RemoteRuleSet) StartContext(ctx context.Context, startContext adapter.RuleSetStartContext) error {
|
||||||
|
var dialer N.Dialer
|
||||||
|
if s.options.RemoteOptions.DownloadDetour != "" {
|
||||||
|
outbound, loaded := s.router.Outbound(s.options.RemoteOptions.DownloadDetour)
|
||||||
|
if !loaded {
|
||||||
|
return E.New("download_detour not found: ", s.options.RemoteOptions.DownloadDetour)
|
||||||
|
}
|
||||||
|
dialer = outbound
|
||||||
|
} else {
|
||||||
|
outbound, err := s.router.DefaultOutbound(N.NetworkTCP)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dialer = outbound
|
||||||
|
}
|
||||||
|
s.dialer = dialer
|
||||||
|
cacheFile := service.FromContext[adapter.CacheFile](s.ctx)
|
||||||
|
if cacheFile != nil {
|
||||||
|
if savedSet := cacheFile.LoadRuleSet(s.options.Tag); savedSet != nil {
|
||||||
|
err := s.loadBytes(savedSet.Content)
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "restore cached rule-set")
|
||||||
|
}
|
||||||
|
s.lastUpdated = savedSet.LastUpdated
|
||||||
|
s.lastEtag = savedSet.LastEtag
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if s.lastUpdated.IsZero() {
|
||||||
|
err := s.fetchOnce(ctx, startContext)
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "initial rule-set: ", s.options.Tag)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.updateTicker = time.NewTicker(s.updateInterval)
|
||||||
|
go s.loopUpdate()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *RemoteRuleSet) PostStart() error {
|
||||||
|
if s.lastUpdated.IsZero() {
|
||||||
|
err := s.fetchOnce(s.ctx, nil)
|
||||||
|
if err != nil {
|
||||||
|
s.logger.Error("fetch rule-set ", s.options.Tag, ": ", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *RemoteRuleSet) Metadata() adapter.RuleSetMetadata {
|
||||||
|
return s.metadata
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *RemoteRuleSet) loadBytes(content []byte) error {
|
||||||
|
var (
|
||||||
|
plainRuleSet option.PlainRuleSet
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
switch s.options.Format {
|
||||||
|
case C.RuleSetFormatSource, "":
|
||||||
|
var compat option.PlainRuleSetCompat
|
||||||
|
decoder := json.NewDecoder(json.NewCommentFilter(bytes.NewReader(content)))
|
||||||
|
decoder.DisallowUnknownFields()
|
||||||
|
err = decoder.Decode(&compat)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
plainRuleSet = compat.Upgrade()
|
||||||
|
case C.RuleSetFormatBinary:
|
||||||
|
plainRuleSet, err = srs.Read(bytes.NewReader(content), false)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return E.New("unknown rule set format: ", s.options.Format)
|
||||||
|
}
|
||||||
|
rules := make([]adapter.HeadlessRule, len(plainRuleSet.Rules))
|
||||||
|
for i, ruleOptions := range plainRuleSet.Rules {
|
||||||
|
rules[i], err = NewHeadlessRule(s.router, ruleOptions)
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "parse rule_set.rules.[", i, "]")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.metadata.ContainsProcessRule = hasHeadlessRule(plainRuleSet.Rules, isProcessHeadlessRule)
|
||||||
|
s.metadata.ContainsWIFIRule = hasHeadlessRule(plainRuleSet.Rules, isWIFIHeadlessRule)
|
||||||
|
s.rules = rules
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *RemoteRuleSet) loopUpdate() {
|
||||||
|
if time.Since(s.lastUpdated) > s.updateInterval {
|
||||||
|
err := s.fetchOnce(s.ctx, nil)
|
||||||
|
if err != nil {
|
||||||
|
s.logger.Error("fetch rule-set ", s.options.Tag, ": ", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
runtime.GC()
|
||||||
|
select {
|
||||||
|
case <-s.ctx.Done():
|
||||||
|
return
|
||||||
|
case <-s.updateTicker.C:
|
||||||
|
s.pauseManager.WaitActive()
|
||||||
|
err := s.fetchOnce(s.ctx, nil)
|
||||||
|
if err != nil {
|
||||||
|
s.logger.Error("fetch rule-set ", s.options.Tag, ": ", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *RemoteRuleSet) fetchOnce(ctx context.Context, startContext adapter.RuleSetStartContext) error {
|
||||||
|
s.logger.Debug("updating rule-set ", s.options.Tag, " from URL: ", s.options.RemoteOptions.URL)
|
||||||
|
var httpClient *http.Client
|
||||||
|
if startContext != nil {
|
||||||
|
httpClient = startContext.HTTPClient(s.options.RemoteOptions.DownloadDetour, s.dialer)
|
||||||
|
} else {
|
||||||
|
httpClient = &http.Client{
|
||||||
|
Transport: &http.Transport{
|
||||||
|
ForceAttemptHTTP2: true,
|
||||||
|
TLSHandshakeTimeout: C.TCPTimeout,
|
||||||
|
DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) {
|
||||||
|
return s.dialer.DialContext(ctx, network, M.ParseSocksaddr(addr))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
request, err := http.NewRequest("GET", s.options.RemoteOptions.URL, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if s.lastEtag != "" {
|
||||||
|
request.Header.Set("If-None-Match", s.lastEtag)
|
||||||
|
}
|
||||||
|
response, err := httpClient.Do(request.WithContext(ctx))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch response.StatusCode {
|
||||||
|
case http.StatusOK:
|
||||||
|
case http.StatusNotModified:
|
||||||
|
s.lastUpdated = time.Now()
|
||||||
|
cacheFile := service.FromContext[adapter.CacheFile](s.ctx)
|
||||||
|
if cacheFile != nil {
|
||||||
|
savedRuleSet := cacheFile.LoadRuleSet(s.options.Tag)
|
||||||
|
if savedRuleSet != nil {
|
||||||
|
savedRuleSet.LastUpdated = s.lastUpdated
|
||||||
|
err = cacheFile.SaveRuleSet(s.options.Tag, savedRuleSet)
|
||||||
|
if err != nil {
|
||||||
|
s.logger.Error("save rule-set updated time: ", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.logger.Info("update rule-set ", s.options.Tag, ": not modified")
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
return E.New("unexpected status: ", response.Status)
|
||||||
|
}
|
||||||
|
content, err := io.ReadAll(response.Body)
|
||||||
|
if err != nil {
|
||||||
|
response.Body.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = s.loadBytes(content)
|
||||||
|
if err != nil {
|
||||||
|
response.Body.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
response.Body.Close()
|
||||||
|
eTagHeader := response.Header.Get("Etag")
|
||||||
|
if eTagHeader != "" {
|
||||||
|
s.lastEtag = eTagHeader
|
||||||
|
}
|
||||||
|
s.lastUpdated = time.Now()
|
||||||
|
cacheFile := service.FromContext[adapter.CacheFile](s.ctx)
|
||||||
|
if cacheFile != nil {
|
||||||
|
err = cacheFile.SaveRuleSet(s.options.Tag, &adapter.SavedRuleSet{
|
||||||
|
LastUpdated: s.lastUpdated,
|
||||||
|
Content: content,
|
||||||
|
LastEtag: s.lastEtag,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
s.logger.Error("save rule-set cache: ", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.logger.Info("updated rule-set ", s.options.Tag)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *RemoteRuleSet) Close() error {
|
||||||
|
s.updateTicker.Stop()
|
||||||
|
s.cancel()
|
||||||
|
return nil
|
||||||
|
}
|
Loading…
Reference in a new issue