5.优化了DST,防止意外crash.

6.修复了mapx的Keys()方法的bug导致内网穿透bridge不稳定的问题.
This commit is contained in:
arraykeys@gmail.com
2018-09-14 18:20:45 +08:00
parent e116bf8897
commit 85178223e0
13 changed files with 88 additions and 43 deletions

View File

@ -7,6 +7,9 @@ v6.1
4.内网穿透增加了TCPS和TOU协议, 4.内网穿透增加了TCPS和TOU协议,
TCPS提供了多种自定义加密TCP方式传输. TCPS提供了多种自定义加密TCP方式传输.
TOU提供了TCP over UDP,多种自定义加密UDP方式传输TCP数据. TOU提供了TCP over UDP,多种自定义加密UDP方式传输TCP数据.
5.优化了DST,防止意外crash.
6.修复了mapx的Keys()方法的bug导致内网穿透bridge不稳定的问题.
v6.0 企业版开源啦 v6.0 企业版开源啦
本次更新主要是把企业版开源,把企业版代码合并到现在的开源goproxy当中,继续遵循GPLv3,免费开源, 本次更新主要是把企业版开源,把企业版代码合并到现在的开源goproxy当中,继续遵循GPLv3,免费开源,

View File

@ -432,7 +432,7 @@ func initConfig() (err error) {
go func() { go func() {
defer func() { defer func() {
if e := recover(); e != nil { if e := recover(); e != nil {
fmt.Printf("crashed, err: %s\nstack:", e, string(debug.Stack())) fmt.Printf("crashed, err: %s\nstack:%s", e, string(debug.Stack()))
} }
}() }()
for { for {
@ -456,7 +456,7 @@ func initConfig() (err error) {
go func() { go func() {
defer func() { defer func() {
if e := recover(); e != nil { if e := recover(); e != nil {
fmt.Printf("crashed, err: %s\nstack:", e, string(debug.Stack())) fmt.Printf("crashed, err: %s\nstack:%s", e, string(debug.Stack()))
} }
}() }()
for scanner.Scan() { for scanner.Scan() {
@ -466,7 +466,7 @@ func initConfig() (err error) {
go func() { go func() {
defer func() { defer func() {
if e := recover(); e != nil { if e := recover(); e != nil {
fmt.Printf("crashed, err: %s\nstack:", e, string(debug.Stack())) fmt.Printf("crashed, err: %s\nstack:%s", e, string(debug.Stack()))
} }
}() }()
for scannerStdErr.Scan() { for scannerStdErr.Scan() {

View File

@ -433,6 +433,12 @@ func (c *Conn) String() string {
// Read can be made to time out and return a Error with Timeout() == true // Read can be made to time out and return a Error with Timeout() == true
// after a fixed time limit; see SetDeadline and SetReadDeadline. // after a fixed time limit; see SetDeadline and SetReadDeadline.
func (c *Conn) Read(b []byte) (n int, err error) { func (c *Conn) Read(b []byte) (n int, err error) {
defer func() {
if e := recover(); e != nil {
n = 0
err = io.EOF
}
}()
c.inbufMut.Lock() c.inbufMut.Lock()
defer c.inbufMut.Unlock() defer c.inbufMut.Unlock()
for c.inbuf.Len() == 0 { for c.inbuf.Len() == 0 {
@ -497,6 +503,9 @@ func (c *Conn) Write(b []byte) (n int, err error) {
// Close closes the connection. // Close closes the connection.
// Any blocked Read or Write operations will be unblocked and return errors. // Any blocked Read or Write operations will be unblocked and return errors.
func (c *Conn) Close() error { func (c *Conn) Close() error {
defer func() {
_ = recover()
}()
c.closeOnce.Do(func() { c.closeOnce.Do(func() {
if debugConnection { if debugConnection {
log.Println(c, "explicit close start") log.Println(c, "explicit close start")

View File

@ -82,7 +82,7 @@ func NewMux(conn net.PacketConn, packetSize int) *Mux {
go func() { go func() {
defer func() { defer func() {
if e := recover(); e != nil { if e := recover(); e != nil {
fmt.Printf("crashed, err: %s\nstack:", e, string(debug.Stack())) fmt.Printf("crashed, err: %s\nstack:%s", e, string(debug.Stack()))
} }
}() }()
m.readerLoop() m.readerLoop()

View File

@ -56,7 +56,7 @@ func newSendBuffer(m *Mux) *sendBuffer {
go func() { go func() {
defer func() { defer func() {
if e := recover(); e != nil { if e := recover(); e != nil {
fmt.Printf("crashed, err: %s\nstack:", e, string(debug.Stack())) fmt.Printf("crashed, err: %s\nstack:%s", e, string(debug.Stack()))
} }
}() }()
b.writerLoop() b.writerLoop()

View File

@ -60,7 +60,7 @@ func (s *DNS) InitService() (err error) {
go func() { go func() {
defer func() { defer func() {
if e := recover(); e != nil { if e := recover(); e != nil {
fmt.Printf("crashed, err: %s\nstack:", e, string(debug.Stack())) fmt.Printf("crashed, err: %s\nstack:%s", e, string(debug.Stack()))
} }
}() }()
for { for {
@ -142,7 +142,7 @@ func (s *DNS) Start(args interface{}, log *logger.Logger) (err error) {
go func() { go func() {
defer func() { defer func() {
if e := recover(); e != nil { if e := recover(); e != nil {
fmt.Printf("crashed, err: %s\nstack:", e, string(debug.Stack())) fmt.Printf("crashed, err: %s\nstack:%s", e, string(debug.Stack()))
} }
}() }()
log.Printf("dns server on udp %s", *s.cfg.Local) log.Printf("dns server on udp %s", *s.cfg.Local)

View File

@ -183,7 +183,7 @@ func (s *HTTP) InitService() (err error) {
go func() { go func() {
defer func() { defer func() {
if e := recover(); e != nil { if e := recover(); e != nil {
fmt.Printf("crashed, err: %s\nstack:", e, string(debug.Stack())) fmt.Printf("crashed, err: %s\nstack:%s", e, string(debug.Stack()))
} }
}() }()
//循环检查ssh网络连通性 //循环检查ssh网络连通性

View File

@ -131,7 +131,11 @@ func (s *MuxBridge) Start(args interface{}, log *logger.Logger) (err error) {
return return
} }
s.sc = &sc s.sc = &sc
if *s.cfg.LocalType == "tou" {
s.log.Printf("%s bridge on %s", *s.cfg.LocalType, sc.UDPListener.LocalAddr())
} else {
s.log.Printf("%s bridge on %s", *s.cfg.LocalType, (*sc.Listener).Addr()) s.log.Printf("%s bridge on %s", *s.cfg.LocalType, (*sc.Listener).Addr())
}
return return
} }
func (s *MuxBridge) Clean() { func (s *MuxBridge) Clean() {
@ -211,20 +215,25 @@ func (s *MuxBridge) handler(inConn net.Conn) {
index := keyInfo[1] index := keyInfo[1]
s.l.Lock() s.l.Lock()
defer s.l.Unlock() defer s.l.Unlock()
var group *mapx.ConcurrentMap
if !s.clientControlConns.Has(groupKey) { if !s.clientControlConns.Has(groupKey) {
item := mapx.NewConcurrentMap() _g := mapx.NewConcurrentMap()
s.clientControlConns.Set(groupKey, &item) group = &_g
} s.clientControlConns.Set(groupKey, group)
//s.log.Printf("init client session group %s", groupKey)
} else {
_group, _ := s.clientControlConns.Get(groupKey) _group, _ := s.clientControlConns.Get(groupKey)
group := _group.(*mapx.ConcurrentMap) group = _group.(*mapx.ConcurrentMap)
}
if v, ok := group.Get(index); ok { if v, ok := group.Get(index); ok {
v.(*smux.Session).Close() v.(*smux.Session).Close()
} }
group.Set(index, session) group.Set(index, session)
//s.log.Printf("set client session %s to group %s,grouplen:%d", index, groupKey, group.Count())
go func() { go func() {
defer func() { defer func() {
if e := recover(); e != nil { if e := recover(); e != nil {
fmt.Printf("crashed, err: %s\nstack:", e, string(debug.Stack())) fmt.Printf("crashed, err: %s\nstack:%s", e, string(debug.Stack()))
} }
}() }()
for { for {
@ -236,10 +245,12 @@ func (s *MuxBridge) handler(inConn net.Conn) {
defer s.l.Unlock() defer s.l.Unlock()
if sess, ok := group.Get(index); ok && sess.(*smux.Session).IsClosed() { if sess, ok := group.Get(index); ok && sess.(*smux.Session).IsClosed() {
group.Remove(index) group.Remove(index)
//s.log.Printf("client session %s removed from group %s, grouplen:%d", key, groupKey, group.Count())
s.log.Printf("client connection %s released", key) s.log.Printf("client connection %s released", key)
} }
if group.IsEmpty() { if group.IsEmpty() {
s.clientControlConns.Remove(groupKey) s.clientControlConns.Remove(groupKey)
//s.log.Printf("client session group %s removed", groupKey)
} }
break break
} }
@ -263,6 +274,7 @@ func (s *MuxBridge) callback(inConn net.Conn, serverID, key string) {
if key == "*" { if key == "*" {
key = s.router.GetKey() key = s.router.GetKey()
} }
//s.log.Printf("server get client session %s", key)
_group, ok := s.clientControlConns.Get(key) _group, ok := s.clientControlConns.Get(key)
if !ok { if !ok {
s.log.Printf("client %s session not exists for server stream %s, retrying...", key, serverID) s.log.Printf("client %s session not exists for server stream %s, retrying...", key, serverID)
@ -270,8 +282,12 @@ func (s *MuxBridge) callback(inConn net.Conn, serverID, key string) {
continue continue
} }
group := _group.(*mapx.ConcurrentMap) group := _group.(*mapx.ConcurrentMap)
keys := group.Keys() keys := []string{}
group.IterCb(func(key string, v interface{}) {
keys = append(keys, key)
})
keysLen := len(keys) keysLen := len(keys)
//s.log.Printf("client session %s , len:%d , keysLen: %d", key, group.Count(), keysLen)
i := 0 i := 0
if keysLen > 0 { if keysLen > 0 {
i = rand.Intn(keysLen) i = rand.Intn(keysLen)
@ -297,7 +313,7 @@ func (s *MuxBridge) callback(inConn net.Conn, serverID, key string) {
go func() { go func() {
defer func() { defer func() {
if e := recover(); e != nil { if e := recover(); e != nil {
fmt.Printf("crashed, err: %s\nstack:", e, string(debug.Stack())) fmt.Printf("crashed, err: %s\nstack:%s", e, string(debug.Stack()))
} }
}() }()
io.Copy(stream, inConn) io.Copy(stream, inConn)
@ -306,7 +322,7 @@ func (s *MuxBridge) callback(inConn net.Conn, serverID, key string) {
go func() { go func() {
defer func() { defer func() {
if e := recover(); e != nil { if e := recover(); e != nil {
fmt.Printf("crashed, err: %s\nstack:", e, string(debug.Stack())) fmt.Printf("crashed, err: %s\nstack:%s", e, string(debug.Stack()))
} }
}() }()
io.Copy(inConn, stream) io.Copy(inConn, stream)

View File

@ -8,6 +8,7 @@ import (
"net" "net"
"runtime/debug" "runtime/debug"
"strings" "strings"
"sync"
"time" "time"
"github.com/golang/snappy" "github.com/golang/snappy"
@ -145,7 +146,7 @@ func (s *MuxClient) Start(args interface{}, log *logger.Logger) (err error) {
defer func() { defer func() {
e := recover() e := recover()
if e != nil { if e != nil {
s.log.Printf("session worker crashed: %s", e) s.log.Printf("session worker crashed: %s\nstack:%s", e, string(debug.Stack()))
} }
}() }()
for { for {
@ -159,7 +160,16 @@ func (s *MuxClient) Start(args interface{}, log *logger.Logger) (err error) {
continue continue
} }
conn.SetDeadline(time.Now().Add(time.Millisecond * time.Duration(*s.cfg.Timeout))) conn.SetDeadline(time.Now().Add(time.Millisecond * time.Duration(*s.cfg.Timeout)))
g := sync.WaitGroup{}
g.Add(1)
go func() {
defer func() {
_ = recover()
g.Done()
}()
_, err = conn.Write(utils.BuildPacket(CONN_CLIENT, fmt.Sprintf("%s-%d", *s.cfg.Key, i))) _, err = conn.Write(utils.BuildPacket(CONN_CLIENT, fmt.Sprintf("%s-%d", *s.cfg.Key, i)))
}()
g.Wait()
conn.SetDeadline(time.Time{}) conn.SetDeadline(time.Time{})
if err != nil { if err != nil {
conn.Close() conn.Close()
@ -316,14 +326,17 @@ func (s *MuxClient) ServeUDP(inConn *smux.Stream, localAddr, ID string) {
item = v.(*ClientUDPConnItem) item = v.(*ClientUDPConnItem)
} }
(*item).touchtime = time.Now().Unix() (*item).touchtime = time.Now().Unix()
go (*item).udpConn.Write(body) go func() {
defer func() { _ = recover() }()
(*item).udpConn.Write(body)
}()
} }
} }
func (s *MuxClient) UDPRevecive(key, ID string) { func (s *MuxClient) UDPRevecive(key, ID string) {
go func() { go func() {
defer func() { defer func() {
if e := recover(); e != nil { if e := recover(); e != nil {
fmt.Printf("crashed, err: %s\nstack:", e, string(debug.Stack())) fmt.Printf("crashed, err: %s\nstack:%s", e, string(debug.Stack()))
} }
}() }()
s.log.Printf("udp conn %s connected", ID) s.log.Printf("udp conn %s connected", ID)
@ -353,7 +366,7 @@ func (s *MuxClient) UDPRevecive(key, ID string) {
go func() { go func() {
defer func() { defer func() {
if e := recover(); e != nil { if e := recover(); e != nil {
fmt.Printf("crashed, err: %s\nstack:", e, string(debug.Stack())) fmt.Printf("crashed, err: %s\nstack:%s", e, string(debug.Stack()))
} }
}() }()
cui.conn.SetWriteDeadline(time.Now().Add(time.Millisecond * time.Duration(*s.cfg.Timeout))) cui.conn.SetWriteDeadline(time.Now().Add(time.Millisecond * time.Duration(*s.cfg.Timeout)))
@ -372,7 +385,7 @@ func (s *MuxClient) UDPGCDeamon() {
go func() { go func() {
defer func() { defer func() {
if e := recover(); e != nil { if e := recover(); e != nil {
fmt.Printf("crashed, err: %s\nstack:", e, string(debug.Stack())) fmt.Printf("crashed, err: %s\nstack:%s", e, string(debug.Stack()))
} }
}() }()
if s.isStop { if s.isStop {
@ -431,7 +444,7 @@ func (s *MuxClient) ServeConn(inConn *smux.Stream, localAddr, ID string) {
go func() { go func() {
defer func() { defer func() {
if e := recover(); e != nil { if e := recover(); e != nil {
fmt.Printf("crashed, err: %s\nstack:", e, string(debug.Stack())) fmt.Printf("crashed, err: %s\nstack:%s", e, string(debug.Stack()))
} }
}() }()
io.Copy(outConn, snappy.NewReader(inConn)) io.Copy(outConn, snappy.NewReader(inConn))
@ -440,7 +453,7 @@ func (s *MuxClient) ServeConn(inConn *smux.Stream, localAddr, ID string) {
go func() { go func() {
defer func() { defer func() {
if e := recover(); e != nil { if e := recover(); e != nil {
fmt.Printf("crashed, err: %s\nstack:", e, string(debug.Stack())) fmt.Printf("crashed, err: %s\nstack:%s", e, string(debug.Stack()))
} }
}() }()
io.Copy(snappy.NewWriter(inConn), outConn) io.Copy(snappy.NewWriter(inConn), outConn)

View File

@ -139,6 +139,10 @@ func (s *MuxServerManager) Start(args interface{}, log *logger.Logger) (err erro
KCP: s.cfg.KCP, KCP: s.cfg.KCP,
ParentType: s.cfg.ParentType, ParentType: s.cfg.ParentType,
Jumper: s.cfg.Jumper, Jumper: s.cfg.Jumper,
TCPSMethod: s.cfg.TCPSMethod,
TCPSPassword: s.cfg.TCPSPassword,
TOUMethod: s.cfg.TOUMethod,
TOUPassword: s.cfg.TOUPassword,
}, log) }, log)
if err != nil { if err != nil {
@ -299,7 +303,7 @@ func (s *MuxServer) Start(args interface{}, log *logger.Logger) (err error) {
go func() { go func() {
defer func() { defer func() {
if e := recover(); e != nil { if e := recover(); e != nil {
fmt.Printf("crashed, err: %s\nstack:", e, string(debug.Stack())) fmt.Printf("crashed, err: %s\nstack:%s", e, string(debug.Stack()))
} }
}() }()
io.Copy(inConn, snappy.NewReader(outConn)) io.Copy(inConn, snappy.NewReader(outConn))
@ -308,7 +312,7 @@ func (s *MuxServer) Start(args interface{}, log *logger.Logger) (err error) {
go func() { go func() {
defer func() { defer func() {
if e := recover(); e != nil { if e := recover(); e != nil {
fmt.Printf("crashed, err: %s\nstack:", e, string(debug.Stack())) fmt.Printf("crashed, err: %s\nstack:%s", e, string(debug.Stack()))
} }
}() }()
io.Copy(snappy.NewWriter(outConn), inConn) io.Copy(snappy.NewWriter(outConn), inConn)
@ -403,7 +407,7 @@ func (s *MuxServer) GetConn(index string) (conn net.Conn, err error) {
go func() { go func() {
defer func() { defer func() {
if e := recover(); e != nil { if e := recover(); e != nil {
fmt.Printf("crashed, err: %s\nstack:", e, string(debug.Stack())) fmt.Printf("crashed, err: %s\nstack:%s", e, string(debug.Stack()))
} }
}() }()
for { for {
@ -475,7 +479,7 @@ func (s *MuxServer) UDPGCDeamon() {
go func() { go func() {
defer func() { defer func() {
if e := recover(); e != nil { if e := recover(); e != nil {
fmt.Printf("crashed, err: %s\nstack:", e, string(debug.Stack())) fmt.Printf("crashed, err: %s\nstack:%s", e, string(debug.Stack()))
} }
}() }()
if s.isStop { if s.isStop {
@ -554,7 +558,7 @@ func (s *MuxServer) UDPRevecive(key, ID string) {
go func() { go func() {
defer func() { defer func() {
if e := recover(); e != nil { if e := recover(); e != nil {
fmt.Printf("crashed, err: %s\nstack:", e, string(debug.Stack())) fmt.Printf("crashed, err: %s\nstack:%s", e, string(debug.Stack()))
} }
}() }()
s.log.Printf("udp conn %s connected", ID) s.log.Printf("udp conn %s connected", ID)
@ -587,7 +591,7 @@ func (s *MuxServer) UDPRevecive(key, ID string) {
go func() { go func() {
defer func() { defer func() {
if e := recover(); e != nil { if e := recover(); e != nil {
fmt.Printf("crashed, err: %s\nstack:", e, string(debug.Stack())) fmt.Printf("crashed, err: %s\nstack:%s", e, string(debug.Stack()))
} }
}() }()
s.sc.UDPListener.WriteToUDP(body, uc.srcAddr) s.sc.UDPListener.WriteToUDP(body, uc.srcAddr)

View File

@ -256,7 +256,7 @@ func (s *TCP) UDPRevecive(key string) {
go func() { go func() {
defer func() { defer func() {
if e := recover(); e != nil { if e := recover(); e != nil {
fmt.Printf("crashed, err: %s\nstack:", e, string(debug.Stack())) fmt.Printf("crashed, err: %s\nstack:%s", e, string(debug.Stack()))
} }
}() }()
s.log.Printf("udp conn %s connected", key) s.log.Printf("udp conn %s connected", key)
@ -286,7 +286,7 @@ func (s *TCP) UDPRevecive(key string) {
go func() { go func() {
defer func() { defer func() {
if e := recover(); e != nil { if e := recover(); e != nil {
fmt.Printf("crashed, err: %s\nstack:", e, string(debug.Stack())) fmt.Printf("crashed, err: %s\nstack:%s", e, string(debug.Stack()))
} }
}() }()
(*cui.conn).SetWriteDeadline(time.Now().Add(time.Millisecond * time.Duration(*s.cfg.Timeout))) (*cui.conn).SetWriteDeadline(time.Now().Add(time.Millisecond * time.Duration(*s.cfg.Timeout)))
@ -305,7 +305,7 @@ func (s *TCP) UDPGCDeamon() {
go func() { go func() {
defer func() { defer func() {
if e := recover(); e != nil { if e := recover(); e != nil {
fmt.Printf("crashed, err: %s\nstack:", e, string(debug.Stack())) fmt.Printf("crashed, err: %s\nstack:%s", e, string(debug.Stack()))
} }
}() }()
if s.isStop { if s.isStop {

View File

@ -155,7 +155,7 @@ func (m ConcurrentMap) Iter() <-chan Tuple {
go func() { go func() {
defer func() { defer func() {
if e := recover(); e != nil { if e := recover(); e != nil {
fmt.Printf("crashed, err: %s\nstack:", e, string(debug.Stack())) fmt.Printf("crashed, err: %s\nstack:%s", e, string(debug.Stack()))
} }
}() }()
fanIn(chans, ch) fanIn(chans, ch)
@ -174,7 +174,7 @@ func (m ConcurrentMap) IterBuffered() <-chan Tuple {
go func() { go func() {
defer func() { defer func() {
if e := recover(); e != nil { if e := recover(); e != nil {
fmt.Printf("crashed, err: %s\nstack:", e, string(debug.Stack())) fmt.Printf("crashed, err: %s\nstack:%s", e, string(debug.Stack()))
} }
}() }()
fanIn(chans, ch) fanIn(chans, ch)
@ -195,7 +195,7 @@ func snapshot(m ConcurrentMap) (chans []chan Tuple) {
go func(index int, shard *ConcurrentMapShared) { go func(index int, shard *ConcurrentMapShared) {
defer func() { defer func() {
if e := recover(); e != nil { if e := recover(); e != nil {
fmt.Printf("crashed, err: %s\nstack:", e, string(debug.Stack())) fmt.Printf("crashed, err: %s\nstack:%s", e, string(debug.Stack()))
} }
}() }()
// Foreach key, value pair. // Foreach key, value pair.
@ -221,7 +221,7 @@ func fanIn(chans []chan Tuple, out chan Tuple) {
go func() { go func() {
defer func() { defer func() {
if e := recover(); e != nil { if e := recover(); e != nil {
fmt.Printf("crashed, err: %s\nstack:", e, string(debug.Stack())) fmt.Printf("crashed, err: %s\nstack:%s", e, string(debug.Stack()))
} }
}() }()
func(ch chan Tuple) { func(ch chan Tuple) {
@ -274,7 +274,7 @@ func (m ConcurrentMap) Keys() []string {
go func() { go func() {
defer func() { defer func() {
if e := recover(); e != nil { if e := recover(); e != nil {
fmt.Printf("crashed, err: %s\nstack:", e, string(debug.Stack())) fmt.Printf("crashed, err: %s\nstack:%s", e, string(debug.Stack()))
} }
}() }()
// Foreach shard. // Foreach shard.
@ -284,7 +284,7 @@ func (m ConcurrentMap) Keys() []string {
go func() { go func() {
defer func() { defer func() {
if e := recover(); e != nil { if e := recover(); e != nil {
fmt.Printf("crashed, err: %s\nstack:", e, string(debug.Stack())) fmt.Printf("crashed, err: %s\nstack:%s", e, string(debug.Stack()))
} }
}() }()
func(shard *ConcurrentMapShared) { func(shard *ConcurrentMapShared) {

View File

@ -90,7 +90,7 @@ func (c *Checker) start() {
go func() { go func() {
defer func() { defer func() {
if e := recover(); e != nil { if e := recover(); e != nil {
fmt.Printf("crashed, err: %s\nstack:", e, string(debug.Stack())) fmt.Printf("crashed, err: %s\nstack:%s", e, string(debug.Stack()))
} }
}() }()
//log.Printf("checker started") //log.Printf("checker started")
@ -100,7 +100,7 @@ func (c *Checker) start() {
go func(item CheckerItem) { go func(item CheckerItem) {
defer func() { defer func() {
if e := recover(); e != nil { if e := recover(); e != nil {
fmt.Printf("crashed, err: %s\nstack:", e, string(debug.Stack())) fmt.Printf("crashed, err: %s\nstack:%s", e, string(debug.Stack()))
} }
}() }()
if c.isNeedCheck(item) { if c.isNeedCheck(item) {