From 698219b6218897241aef7e148e2e611f9b8c6ccf Mon Sep 17 00:00:00 2001
From: fatedier <fatedier@gmail.com>
Date: Mon, 16 Jul 2018 01:21:29 +0800
Subject: [PATCH 1/3] frpc: support health check

---
 client/control.go       |   1 +
 client/health.go        | 123 ++++++++++++++++++++++++++++++++++++++--
 client/proxy_manager.go |   1 -
 models/config/proxy.go  |   2 +
 4 files changed, 122 insertions(+), 5 deletions(-)

diff --git a/client/control.go b/client/control.go
index 04be13ce..90d2d4ab 100644
--- a/client/control.go
+++ b/client/control.go
@@ -255,6 +255,7 @@ func (ctl *Control) login() (err error) {
 	return nil
 }
 
+// connectServer return a new connection to frps
 func (ctl *Control) connectServer() (conn frpNet.Conn, err error) {
 	if g.GlbClientCfg.TcpMux {
 		stream, errRet := ctl.session.OpenStream()
diff --git a/client/health.go b/client/health.go
index ad58554d..8e84a6f8 100644
--- a/client/health.go
+++ b/client/health.go
@@ -15,18 +15,133 @@
 package client
 
 import (
-	"github.com/fatedier/frp/models/config"
+	"context"
+	"net"
+	"net/http"
+	"time"
 )
 
 type HealthCheckMonitor struct {
-	cfg config.HealthCheckConf
+	checkType      string
+	interval       time.Duration
+	timeout        time.Duration
+	maxFailedTimes int
+
+	// For tcp
+	addr string
+
+	// For http
+	url string
+
+	failedTimes    uint64
+	statusOK       bool
+	statusNormalFn func()
+	statusFailedFn func()
+
+	ctx    context.Context
+	cancel context.CancelFunc
 }
 
-func NewHealthCheckMonitor(cfg *config.HealthCheckConf) *HealthCheckMonitor {
+func NewHealthCheckMonitor(checkType string, intervalS int, timeoutS int, maxFailedTimes int, addr string, url string,
+	statusNormalFn func(), statusFailedFn func()) *HealthCheckMonitor {
+
+	if intervalS <= 0 {
+		intervalS = 10
+	}
+	if timeoutS <= 0 {
+		timeoutS = 3
+	}
+	if maxFailedTimes <= 0 {
+		maxFailedTimes = 1
+	}
+	ctx, cancel := context.WithCancel(context.Background())
 	return &HealthCheckMonitor{
-		cfg: *cfg,
+		checkType:      checkType,
+		interval:       time.Duration(intervalS) * time.Second,
+		timeout:        time.Duration(timeoutS) * time.Second,
+		maxFailedTimes: maxFailedTimes,
+		addr:           addr,
+		url:            url,
+		statusOK:       false,
+		statusNormalFn: statusNormalFn,
+		statusFailedFn: statusFailedFn,
+		ctx:            ctx,
+		cancel:         cancel,
 	}
 }
 
 func (monitor *HealthCheckMonitor) Start() {
+	go monitor.checkWorker()
+}
+
+func (monitor *HealthCheckMonitor) Stop() {
+	monitor.cancel()
+}
+
+func (monitor *HealthCheckMonitor) checkWorker() {
+	for {
+		ctx, cancel := context.WithDeadline(monitor.ctx, time.Now().Add(monitor.timeout))
+		ok := monitor.doCheck(ctx)
+
+		// check if this monitor has been closed
+		select {
+		case <-ctx.Done():
+			cancel()
+			return
+		default:
+			cancel()
+		}
+
+		if ok {
+			if !monitor.statusOK && monitor.statusNormalFn != nil {
+				monitor.statusOK = true
+				monitor.statusNormalFn()
+			}
+		} else {
+			monitor.failedTimes++
+			if monitor.statusOK && int(monitor.failedTimes) >= monitor.maxFailedTimes && monitor.statusFailedFn != nil {
+				monitor.statusOK = false
+				monitor.statusFailedFn()
+			}
+		}
+
+		time.Sleep(monitor.interval)
+	}
+}
+
+func (monitor *HealthCheckMonitor) doCheck(ctx context.Context) bool {
+	switch monitor.checkType {
+	case "tcp":
+		return monitor.doTcpCheck(ctx)
+	case "http":
+		return monitor.doHttpCheck(ctx)
+	default:
+		return false
+	}
+}
+
+func (monitor *HealthCheckMonitor) doTcpCheck(ctx context.Context) bool {
+	var d net.Dialer
+	conn, err := d.DialContext(ctx, "tcp", monitor.addr)
+	if err != nil {
+		return false
+	}
+	conn.Close()
+	return true
+}
+
+func (monitor *HealthCheckMonitor) doHttpCheck(ctx context.Context) bool {
+	req, err := http.NewRequest("GET", monitor.url, nil)
+	if err != nil {
+		return false
+	}
+	resp, err := http.DefaultClient.Do(req)
+	if err != nil {
+		return false
+	}
+
+	if resp.StatusCode/100 != 2 {
+		return false
+	}
+	return true
 }
diff --git a/client/proxy_manager.go b/client/proxy_manager.go
index cfa56fc5..bd193bb8 100644
--- a/client/proxy_manager.go
+++ b/client/proxy_manager.go
@@ -18,7 +18,6 @@ const (
 	ProxyStatusWaitStart    = "wait start"
 	ProxyStatusRunning      = "running"
 	ProxyStatusCheckFailed  = "check failed"
-	ProxyStatusCheckSuccess = "check success"
 	ProxyStatusClosed       = "closed"
 )
 
diff --git a/models/config/proxy.go b/models/config/proxy.go
index b600be5c..9ea680a4 100644
--- a/models/config/proxy.go
+++ b/models/config/proxy.go
@@ -381,6 +381,8 @@ func (cfg *LocalSvrConf) checkForCli() (err error) {
 // Health check info
 type HealthCheckConf struct {
 	HealthCheckType      string `json:"health_check_type"` // tcp | http
+	HealthCheckTimeout   int    `json:"health_check_timeout"`
+	HealthCheckMaxFailed int    `json:"health_check_max_failed"`
 	HealthCheckIntervalS int    `json:"health_check_interval_s"`
 	HealthCheckUrl       string `json:"health_check_url"`
 

From 1a8ac148ca8ff25b72b388e563b37fbbdfa4bffc Mon Sep 17 00:00:00 2001
From: fatedier <fatedier@gmail.com>
Date: Thu, 18 Oct 2018 13:55:51 +0800
Subject: [PATCH 2/3] fix xtcp visitor panic

---
 client/proxy_manager.go | 12 ++++++------
 client/visitor.go       |  9 +++++++++
 2 files changed, 15 insertions(+), 6 deletions(-)

diff --git a/client/proxy_manager.go b/client/proxy_manager.go
index bd193bb8..fe175a05 100644
--- a/client/proxy_manager.go
+++ b/client/proxy_manager.go
@@ -13,12 +13,12 @@ import (
 )
 
 const (
-	ProxyStatusNew          = "new"
-	ProxyStatusStartErr     = "start error"
-	ProxyStatusWaitStart    = "wait start"
-	ProxyStatusRunning      = "running"
-	ProxyStatusCheckFailed  = "check failed"
-	ProxyStatusClosed       = "closed"
+	ProxyStatusNew         = "new"
+	ProxyStatusStartErr    = "start error"
+	ProxyStatusWaitStart   = "wait start"
+	ProxyStatusRunning     = "running"
+	ProxyStatusCheckFailed = "check failed"
+	ProxyStatusClosed      = "closed"
 )
 
 type ProxyManager struct {
diff --git a/client/visitor.go b/client/visitor.go
index 6e1e1c8d..66344019 100644
--- a/client/visitor.go
+++ b/client/visitor.go
@@ -202,7 +202,16 @@ func (sv *XtcpVisitor) handleConn(userConn frpNet.Conn) {
 
 	raddr, err := net.ResolveUDPAddr("udp",
 		fmt.Sprintf("%s:%d", g.GlbClientCfg.ServerAddr, g.GlbClientCfg.ServerUdpPort))
+	if err != nil {
+		sv.Error("resolve server UDP addr error")
+		return
+	}
+
 	visitorConn, err := net.DialUDP("udp", nil, raddr)
+	if err != nil {
+		sv.Warn("dial server udp addr error: %v", err)
+		return
+	}
 	defer visitorConn.Close()
 
 	now := time.Now().Unix()

From b33ea9274cdd263dd0a2ff9cf8f068606171d42a Mon Sep 17 00:00:00 2001
From: fatedier <fatedier@gmail.com>
Date: Tue, 6 Nov 2018 18:35:05 +0800
Subject: [PATCH 3/3] client/control: refactor code

---
 client/admin_api.go     |   2 +-
 client/control.go       | 213 ++++++------------------------------
 client/proxy.go         |   2 +-
 client/proxy_manager.go | 232 ++++++++++++++++++++--------------------
 client/service.go       | 185 ++++++++++++++++++++++++++++++--
 conf/frpc_full.ini      |   2 +-
 6 files changed, 323 insertions(+), 313 deletions(-)

diff --git a/client/admin_api.go b/client/admin_api.go
index 4eafa103..50745406 100644
--- a/client/admin_api.go
+++ b/client/admin_api.go
@@ -85,7 +85,7 @@ func (svr *Service) apiReload(w http.ResponseWriter, r *http.Request) {
 		return
 	}
 
-	err = svr.ctl.reloadConf(pxyCfgs, visitorCfgs)
+	err = svr.ctl.ReloadConf(pxyCfgs, visitorCfgs)
 	if err != nil {
 		res.Code = 4
 		res.Msg = err.Error()
diff --git a/client/control.go b/client/control.go
index 90d2d4ab..09ca2a02 100644
--- a/client/control.go
+++ b/client/control.go
@@ -17,8 +17,6 @@ package client
 import (
 	"fmt"
 	"io"
-	"io/ioutil"
-	"runtime"
 	"runtime/debug"
 	"sync"
 	"time"
@@ -28,24 +26,15 @@ import (
 	"github.com/fatedier/frp/models/msg"
 	"github.com/fatedier/frp/utils/log"
 	frpNet "github.com/fatedier/frp/utils/net"
-	"github.com/fatedier/frp/utils/util"
-	"github.com/fatedier/frp/utils/version"
 
 	"github.com/fatedier/golib/control/shutdown"
 	"github.com/fatedier/golib/crypto"
 	fmux "github.com/hashicorp/yamux"
 )
 
-const (
-	connReadTimeout time.Duration = 10 * time.Second
-)
-
 type Control struct {
-	// frpc service
-	svr *Service
-
-	// login message to server, only used
-	loginMsg *msg.Login
+	// uniq id got from frps, attach it in loginMsg
+	runId string
 
 	// manage all proxies
 	pm *ProxyManager
@@ -65,14 +54,10 @@ type Control struct {
 	// read from this channel to get the next message sent by server
 	readCh chan (msg.Message)
 
-	// run id got from server
-	runId string
-
-	// if we call close() in control, do not reconnect to server
-	exit bool
-
 	// goroutines can block by reading from this channel, it will be closed only in reader() when control connection is closed
-	closedCh chan int
+	closedCh chan struct{}
+
+	closedDoneCh chan struct{}
 
 	// last time got the Pong message
 	lastPong time.Time
@@ -86,50 +71,28 @@ type Control struct {
 	log.Logger
 }
 
-func NewControl(svr *Service, pxyCfgs map[string]config.ProxyConf, visitorCfgs map[string]config.VisitorConf) *Control {
-	loginMsg := &msg.Login{
-		Arch:      runtime.GOARCH,
-		Os:        runtime.GOOS,
-		PoolCount: g.GlbClientCfg.PoolCount,
-		User:      g.GlbClientCfg.User,
-		Version:   version.Full(),
-	}
+func NewControl(runId string, conn frpNet.Conn, session *fmux.Session, pxyCfgs map[string]config.ProxyConf, visitorCfgs map[string]config.VisitorConf) *Control {
 	ctl := &Control{
-		svr:                svr,
-		loginMsg:           loginMsg,
+		runId:              runId,
+		conn:               conn,
+		session:            session,
 		sendCh:             make(chan msg.Message, 100),
 		readCh:             make(chan msg.Message, 100),
-		closedCh:           make(chan int),
+		closedCh:           make(chan struct{}),
+		closedDoneCh:       make(chan struct{}),
 		readerShutdown:     shutdown.New(),
 		writerShutdown:     shutdown.New(),
 		msgHandlerShutdown: shutdown.New(),
 		Logger:             log.NewPrefixLogger(""),
 	}
-	ctl.pm = NewProxyManager(ctl, ctl.sendCh, "")
+	ctl.pm = NewProxyManager(ctl.sendCh, "")
 	ctl.pm.Reload(pxyCfgs, false)
 	ctl.vm = NewVisitorManager(ctl)
 	ctl.vm.Reload(visitorCfgs)
 	return ctl
 }
 
-func (ctl *Control) Run() (err error) {
-	for {
-		err = ctl.login()
-		if err != nil {
-			ctl.Warn("login to server failed: %v", err)
-
-			// if login_fail_exit is true, just exit this program
-			// otherwise sleep a while and continues relogin to server
-			if g.GlbClientCfg.LoginFailExit {
-				return
-			} else {
-				time.Sleep(10 * time.Second)
-			}
-		} else {
-			break
-		}
-	}
-
+func (ctl *Control) Run() {
 	go ctl.worker()
 
 	// start all local visitors and send NewProxy message for all configured proxies
@@ -137,7 +100,7 @@ func (ctl *Control) Run() (err error) {
 	ctl.pm.CheckAndStartProxy([]string{ProxyStatusNew})
 
 	go ctl.vm.Run()
-	return nil
+	return
 }
 
 func (ctl *Control) HandleReqWorkConn(inMsg *msg.ReqWorkConn) {
@@ -179,80 +142,13 @@ func (ctl *Control) HandleNewProxyResp(inMsg *msg.NewProxyResp) {
 }
 
 func (ctl *Control) Close() error {
-	ctl.mu.Lock()
-	defer ctl.mu.Unlock()
-	ctl.exit = true
 	ctl.pm.CloseProxies()
 	return nil
 }
 
-// login send a login message to server and wait for a loginResp message.
-func (ctl *Control) login() (err error) {
-	if ctl.conn != nil {
-		ctl.conn.Close()
-	}
-	if ctl.session != nil {
-		ctl.session.Close()
-	}
-
-	conn, err := frpNet.ConnectServerByProxy(g.GlbClientCfg.HttpProxy, g.GlbClientCfg.Protocol,
-		fmt.Sprintf("%s:%d", g.GlbClientCfg.ServerAddr, g.GlbClientCfg.ServerPort))
-	if err != nil {
-		return err
-	}
-
-	defer func() {
-		if err != nil {
-			conn.Close()
-		}
-	}()
-
-	if g.GlbClientCfg.TcpMux {
-		fmuxCfg := fmux.DefaultConfig()
-		fmuxCfg.LogOutput = ioutil.Discard
-		session, errRet := fmux.Client(conn, fmuxCfg)
-		if errRet != nil {
-			return errRet
-		}
-		stream, errRet := session.OpenStream()
-		if errRet != nil {
-			session.Close()
-			return errRet
-		}
-		conn = frpNet.WrapConn(stream)
-		ctl.session = session
-	}
-
-	now := time.Now().Unix()
-	ctl.loginMsg.PrivilegeKey = util.GetAuthKey(g.GlbClientCfg.Token, now)
-	ctl.loginMsg.Timestamp = now
-	ctl.loginMsg.RunId = ctl.runId
-
-	if err = msg.WriteMsg(conn, ctl.loginMsg); err != nil {
-		return err
-	}
-
-	var loginRespMsg msg.LoginResp
-	conn.SetReadDeadline(time.Now().Add(connReadTimeout))
-	if err = msg.ReadMsgInto(conn, &loginRespMsg); err != nil {
-		return err
-	}
-	conn.SetReadDeadline(time.Time{})
-
-	if loginRespMsg.Error != "" {
-		err = fmt.Errorf("%s", loginRespMsg.Error)
-		ctl.Error("%s", loginRespMsg.Error)
-		return err
-	}
-
-	ctl.conn = conn
-	// update runId got from server
-	ctl.runId = loginRespMsg.RunId
-	g.GlbClientCfg.ServerUdpPort = loginRespMsg.ServerUdpPort
-	ctl.ClearLogPrefix()
-	ctl.AddLogPrefix(loginRespMsg.RunId)
-	ctl.Info("login to server success, get run id [%s], server udp port [%d]", loginRespMsg.RunId, loginRespMsg.ServerUdpPort)
-	return nil
+// ClosedDoneCh returns a channel which will be closed after all resources are released
+func (ctl *Control) ClosedDoneCh() <-chan struct{} {
+	return ctl.closedDoneCh
 }
 
 // connectServer return a new connection to frps
@@ -373,87 +269,38 @@ func (ctl *Control) msgHandler() {
 	}
 }
 
-// controler keep watching closedCh, start a new connection if previous control connection is closed.
-// If controler is notified by closedCh, reader and writer and handler will exit, then recall these functions.
+// If controler is notified by closedCh, reader and writer and handler will exit
 func (ctl *Control) worker() {
 	go ctl.msgHandler()
 	go ctl.reader()
 	go ctl.writer()
 
-	var err error
-	maxDelayTime := 20 * time.Second
-	delayTime := time.Second
-
 	checkInterval := 60 * time.Second
 	checkProxyTicker := time.NewTicker(checkInterval)
+
 	for {
 		select {
 		case <-checkProxyTicker.C:
 			// check which proxy registered failed and reregister it to server
 			ctl.pm.CheckAndStartProxy([]string{ProxyStatusStartErr, ProxyStatusClosed})
-		case _, ok := <-ctl.closedCh:
-			// we won't get any variable from this channel
-			if !ok {
-				// close related channels and wait until other goroutines done
-				close(ctl.readCh)
-				ctl.readerShutdown.WaitDone()
-				ctl.msgHandlerShutdown.WaitDone()
+		case <-ctl.closedCh:
+			// close related channels and wait until other goroutines done
+			close(ctl.readCh)
+			ctl.readerShutdown.WaitDone()
+			ctl.msgHandlerShutdown.WaitDone()
 
-				close(ctl.sendCh)
-				ctl.writerShutdown.WaitDone()
+			close(ctl.sendCh)
+			ctl.writerShutdown.WaitDone()
 
-				ctl.pm.CloseProxies()
-				// if ctl.exit is true, just exit
-				ctl.mu.RLock()
-				exit := ctl.exit
-				ctl.mu.RUnlock()
-				if exit {
-					return
-				}
+			ctl.pm.CloseProxies()
 
-				// loop util reconnecting to server success
-				for {
-					ctl.Info("try to reconnect to server...")
-					err = ctl.login()
-					if err != nil {
-						ctl.Warn("reconnect to server error: %v", err)
-						time.Sleep(delayTime)
-						delayTime = delayTime * 2
-						if delayTime > maxDelayTime {
-							delayTime = maxDelayTime
-						}
-						continue
-					}
-					// reconnect success, init delayTime
-					delayTime = time.Second
-					break
-				}
-
-				// init related channels and variables
-				ctl.sendCh = make(chan msg.Message, 100)
-				ctl.readCh = make(chan msg.Message, 100)
-				ctl.closedCh = make(chan int)
-				ctl.readerShutdown = shutdown.New()
-				ctl.writerShutdown = shutdown.New()
-				ctl.msgHandlerShutdown = shutdown.New()
-				ctl.pm.Reset(ctl.sendCh, ctl.runId)
-
-				// previous work goroutines should be closed and start them here
-				go ctl.msgHandler()
-				go ctl.writer()
-				go ctl.reader()
-
-				// start all configured proxies
-				ctl.pm.CheckAndStartProxy([]string{ProxyStatusNew, ProxyStatusClosed})
-
-				checkProxyTicker.Stop()
-				checkProxyTicker = time.NewTicker(checkInterval)
-			}
+			close(ctl.closedDoneCh)
+			return
 		}
 	}
 }
 
-func (ctl *Control) reloadConf(pxyCfgs map[string]config.ProxyConf, visitorCfgs map[string]config.VisitorConf) error {
+func (ctl *Control) ReloadConf(pxyCfgs map[string]config.ProxyConf, visitorCfgs map[string]config.VisitorConf) error {
 	ctl.vm.Reload(visitorCfgs)
 	err := ctl.pm.Reload(pxyCfgs, true)
 	return err
diff --git a/client/proxy.go b/client/proxy.go
index 26c9a66e..a89921d2 100644
--- a/client/proxy.go
+++ b/client/proxy.go
@@ -35,7 +35,7 @@ import (
 	"github.com/fatedier/golib/pool"
 )
 
-// Proxy defines how to deal with work connections for different proxy type.
+// Proxy defines how to handle work connections for different proxy type.
 type Proxy interface {
 	Run() error
 
diff --git a/client/proxy_manager.go b/client/proxy_manager.go
index fe175a05..dc9f350d 100644
--- a/client/proxy_manager.go
+++ b/client/proxy_manager.go
@@ -22,7 +22,6 @@ const (
 )
 
 type ProxyManager struct {
-	ctl     *Control
 	sendCh  chan (msg.Message)
 	proxies map[string]*ProxyWrapper
 	closed  bool
@@ -31,122 +30,8 @@ type ProxyManager struct {
 	log.Logger
 }
 
-type ProxyWrapper struct {
-	Name   string
-	Type   string
-	Status string
-	Err    string
-	Cfg    config.ProxyConf
-
-	RemoteAddr string
-
-	pxy Proxy
-
-	mu sync.RWMutex
-}
-
-type ProxyStatus struct {
-	Name   string           `json:"name"`
-	Type   string           `json:"type"`
-	Status string           `json:"status"`
-	Err    string           `json:"err"`
-	Cfg    config.ProxyConf `json:"cfg"`
-
-	// Got from server.
-	RemoteAddr string `json:"remote_addr"`
-}
-
-func NewProxyWrapper(cfg config.ProxyConf) *ProxyWrapper {
-	return &ProxyWrapper{
-		Name:   cfg.GetBaseInfo().ProxyName,
-		Type:   cfg.GetBaseInfo().ProxyType,
-		Status: ProxyStatusNew,
-		Cfg:    cfg,
-		pxy:    nil,
-	}
-}
-
-func (pw *ProxyWrapper) GetStatusStr() string {
-	pw.mu.RLock()
-	defer pw.mu.RUnlock()
-	return pw.Status
-}
-
-func (pw *ProxyWrapper) GetStatus() *ProxyStatus {
-	pw.mu.RLock()
-	defer pw.mu.RUnlock()
-	ps := &ProxyStatus{
-		Name:       pw.Name,
-		Type:       pw.Type,
-		Status:     pw.Status,
-		Err:        pw.Err,
-		Cfg:        pw.Cfg,
-		RemoteAddr: pw.RemoteAddr,
-	}
-	return ps
-}
-
-func (pw *ProxyWrapper) WaitStart() {
-	pw.mu.Lock()
-	defer pw.mu.Unlock()
-	pw.Status = ProxyStatusWaitStart
-}
-
-func (pw *ProxyWrapper) Start(remoteAddr string, serverRespErr string) error {
-	if pw.pxy != nil {
-		pw.pxy.Close()
-		pw.pxy = nil
-	}
-
-	if serverRespErr != "" {
-		pw.mu.Lock()
-		pw.Status = ProxyStatusStartErr
-		pw.RemoteAddr = remoteAddr
-		pw.Err = serverRespErr
-		pw.mu.Unlock()
-		return fmt.Errorf(serverRespErr)
-	}
-
-	pxy := NewProxy(pw.Cfg)
-	pw.mu.Lock()
-	defer pw.mu.Unlock()
-	pw.RemoteAddr = remoteAddr
-	if err := pxy.Run(); err != nil {
-		pw.Status = ProxyStatusStartErr
-		pw.Err = err.Error()
-		return err
-	}
-	pw.Status = ProxyStatusRunning
-	pw.Err = ""
-	pw.pxy = pxy
-	return nil
-}
-
-func (pw *ProxyWrapper) InWorkConn(workConn frpNet.Conn) {
-	pw.mu.RLock()
-	pxy := pw.pxy
-	pw.mu.RUnlock()
-	if pxy != nil {
-		workConn.Debug("start a new work connection, localAddr: %s remoteAddr: %s", workConn.LocalAddr().String(), workConn.RemoteAddr().String())
-		go pxy.InWorkConn(workConn)
-	} else {
-		workConn.Close()
-	}
-}
-
-func (pw *ProxyWrapper) Close() {
-	pw.mu.Lock()
-	defer pw.mu.Unlock()
-	if pw.pxy != nil {
-		pw.pxy.Close()
-		pw.pxy = nil
-	}
-	pw.Status = ProxyStatusClosed
-}
-
-func NewProxyManager(ctl *Control, msgSendCh chan (msg.Message), logPrefix string) *ProxyManager {
+func NewProxyManager(msgSendCh chan (msg.Message), logPrefix string) *ProxyManager {
 	return &ProxyManager{
-		ctl:     ctl,
 		proxies: make(map[string]*ProxyWrapper),
 		sendCh:  msgSendCh,
 		closed:  false,
@@ -309,3 +194,118 @@ func (pm *ProxyManager) GetAllProxyStatus() []*ProxyStatus {
 	}
 	return ps
 }
+
+type ProxyStatus struct {
+	Name   string           `json:"name"`
+	Type   string           `json:"type"`
+	Status string           `json:"status"`
+	Err    string           `json:"err"`
+	Cfg    config.ProxyConf `json:"cfg"`
+
+	// Got from server.
+	RemoteAddr string `json:"remote_addr"`
+}
+
+// ProxyWrapper is a wrapper of Proxy interface only used in ProxyManager
+// Add additional proxy status info
+type ProxyWrapper struct {
+	Name   string
+	Type   string
+	Status string
+	Err    string
+	Cfg    config.ProxyConf
+
+	RemoteAddr string
+
+	pxy Proxy
+
+	mu sync.RWMutex
+}
+
+func NewProxyWrapper(cfg config.ProxyConf) *ProxyWrapper {
+	return &ProxyWrapper{
+		Name:   cfg.GetBaseInfo().ProxyName,
+		Type:   cfg.GetBaseInfo().ProxyType,
+		Status: ProxyStatusNew,
+		Cfg:    cfg,
+		pxy:    nil,
+	}
+}
+
+func (pw *ProxyWrapper) GetStatusStr() string {
+	pw.mu.RLock()
+	defer pw.mu.RUnlock()
+	return pw.Status
+}
+
+func (pw *ProxyWrapper) GetStatus() *ProxyStatus {
+	pw.mu.RLock()
+	defer pw.mu.RUnlock()
+	ps := &ProxyStatus{
+		Name:       pw.Name,
+		Type:       pw.Type,
+		Status:     pw.Status,
+		Err:        pw.Err,
+		Cfg:        pw.Cfg,
+		RemoteAddr: pw.RemoteAddr,
+	}
+	return ps
+}
+
+func (pw *ProxyWrapper) WaitStart() {
+	pw.mu.Lock()
+	defer pw.mu.Unlock()
+	pw.Status = ProxyStatusWaitStart
+}
+
+func (pw *ProxyWrapper) Start(remoteAddr string, serverRespErr string) error {
+	if pw.pxy != nil {
+		pw.pxy.Close()
+		pw.pxy = nil
+	}
+
+	if serverRespErr != "" {
+		pw.mu.Lock()
+		pw.Status = ProxyStatusStartErr
+		pw.RemoteAddr = remoteAddr
+		pw.Err = serverRespErr
+		pw.mu.Unlock()
+		return fmt.Errorf(serverRespErr)
+	}
+
+	pxy := NewProxy(pw.Cfg)
+	pw.mu.Lock()
+	defer pw.mu.Unlock()
+	pw.RemoteAddr = remoteAddr
+	if err := pxy.Run(); err != nil {
+		pw.Status = ProxyStatusStartErr
+		pw.Err = err.Error()
+		return err
+	}
+	pw.Status = ProxyStatusRunning
+	pw.Err = ""
+	pw.pxy = pxy
+	return nil
+}
+
+func (pw *ProxyWrapper) InWorkConn(workConn frpNet.Conn) {
+	pw.mu.RLock()
+	pxy := pw.pxy
+	pw.mu.RUnlock()
+	if pxy != nil {
+		workConn.Debug("start a new work connection, localAddr: %s remoteAddr: %s", workConn.LocalAddr().String(), workConn.RemoteAddr().String())
+		go pxy.InWorkConn(workConn)
+	} else {
+		workConn.Close()
+	}
+}
+
+func (pw *ProxyWrapper) Close() {
+	pw.mu.Lock()
+	defer pw.mu.Unlock()
+	if pw.pxy != nil {
+		pw.pxy.Close()
+		pw.pxy = nil
+	}
+	pw.Status = ProxyStatusClosed
+}
diff --git a/client/service.go b/client/service.go
index 2589f520..62cf1518 100644
--- a/client/service.go
+++ b/client/service.go
@@ -15,35 +15,85 @@
 package client
 
 import (
+	"fmt"
+	"io/ioutil"
+	"runtime"
+	"sync"
+	"sync/atomic"
+	"time"
+
 	"github.com/fatedier/frp/g"
 	"github.com/fatedier/frp/models/config"
+	"github.com/fatedier/frp/models/msg"
 	"github.com/fatedier/frp/utils/log"
+	frpNet "github.com/fatedier/frp/utils/net"
+	"github.com/fatedier/frp/utils/util"
+	"github.com/fatedier/frp/utils/version"
+
+	fmux "github.com/hashicorp/yamux"
 )
 
 type Service struct {
-	// manager control connection with server
-	ctl *Control
+	// uniq id got from frps, attach it in loginMsg
+	runId string
 
+	// manager control connection with server
+	ctl   *Control
+	ctlMu sync.RWMutex
+
+	pxyCfgs     map[string]config.ProxyConf
+	visitorCfgs map[string]config.VisitorConf
+	cfgMu       sync.RWMutex
+
+	exit     uint32 // 0 means not exit
 	closedCh chan int
 }
 
 func NewService(pxyCfgs map[string]config.ProxyConf, visitorCfgs map[string]config.VisitorConf) (svr *Service) {
 	svr = &Service{
-		closedCh: make(chan int),
+		pxyCfgs:     pxyCfgs,
+		visitorCfgs: visitorCfgs,
+		exit:        0,
+		closedCh:    make(chan int),
 	}
-	ctl := NewControl(svr, pxyCfgs, visitorCfgs)
-	svr.ctl = ctl
 	return
 }
 
+func (svr *Service) GetController() *Control {
+	svr.ctlMu.RLock()
+	defer svr.ctlMu.RUnlock()
+	return svr.ctl
+}
+
 func (svr *Service) Run() error {
-	err := svr.ctl.Run()
-	if err != nil {
-		return err
+	// first login
+	for {
+		conn, session, err := svr.login()
+		if err != nil {
+			log.Warn("login to server failed: %v", err)
+
+			// if login_fail_exit is true, just exit this program
+			// otherwise sleep a while and try again to connect to server
+			if g.GlbClientCfg.LoginFailExit {
+				return err
+			} else {
+				time.Sleep(10 * time.Second)
+			}
+		} else {
+			// login success
+			ctl := NewControl(svr.runId, conn, session, svr.pxyCfgs, svr.visitorCfgs)
+			ctl.Run()
+			svr.ctlMu.Lock()
+			svr.ctl = ctl
+			svr.ctlMu.Unlock()
+			break
+		}
 	}
 
+	go svr.keepControllerWorking()
+
 	if g.GlbClientCfg.AdminPort != 0 {
-		err = svr.RunAdminServer(g.GlbClientCfg.AdminAddr, g.GlbClientCfg.AdminPort)
+		err := svr.RunAdminServer(g.GlbClientCfg.AdminAddr, g.GlbClientCfg.AdminPort)
 		if err != nil {
 			log.Warn("run admin server error: %v", err)
 		}
@@ -54,6 +104,119 @@ func (svr *Service) Run() error {
 	return nil
 }
 
-func (svr *Service) Close() {
-	svr.ctl.Close()
+func (svr *Service) keepControllerWorking() {
+	maxDelayTime := 20 * time.Second
+	delayTime := time.Second
+
+	for {
+		<-svr.ctl.ClosedDoneCh()
+		if atomic.LoadUint32(&svr.exit) != 0 {
+			return
+		}
+
+		for {
+			log.Info("try to reconnect to server...")
+			conn, session, err := svr.login()
+			if err != nil {
+				log.Warn("reconnect to server error: %v", err)
+				time.Sleep(delayTime)
+				delayTime = delayTime * 2
+				if delayTime > maxDelayTime {
+					delayTime = maxDelayTime
+				}
+				continue
+			}
+			// reconnect success, init delayTime
+			delayTime = time.Second
+
+			ctl := NewControl(svr.runId, conn, session, svr.pxyCfgs, svr.visitorCfgs)
+			ctl.Run()
+			svr.ctlMu.Lock()
+			svr.ctl = ctl
+			svr.ctlMu.Unlock()
+			break
+		}
+	}
+}
+
+// login creates a connection to frps and registers it self as a client
+// conn: control connection
+// session: if it's not nil, using tcp mux
+func (svr *Service) login() (conn frpNet.Conn, session *fmux.Session, err error) {
+	conn, err = frpNet.ConnectServerByProxy(g.GlbClientCfg.HttpProxy, g.GlbClientCfg.Protocol,
+		fmt.Sprintf("%s:%d", g.GlbClientCfg.ServerAddr, g.GlbClientCfg.ServerPort))
+	if err != nil {
+		return
+	}
+
+	defer func() {
+		if err != nil {
+			conn.Close()
+		}
+	}()
+
+	if g.GlbClientCfg.TcpMux {
+		fmuxCfg := fmux.DefaultConfig()
+		fmuxCfg.LogOutput = ioutil.Discard
+		session, err = fmux.Client(conn, fmuxCfg)
+		if err != nil {
+			return
+		}
+		stream, errRet := session.OpenStream()
+		if errRet != nil {
+			session.Close()
+			err = errRet
+			return
+		}
+		conn = frpNet.WrapConn(stream)
+	}
+
+	now := time.Now().Unix()
+	loginMsg := &msg.Login{
+		Arch:         runtime.GOARCH,
+		Os:           runtime.GOOS,
+		PoolCount:    g.GlbClientCfg.PoolCount,
+		User:         g.GlbClientCfg.User,
+		Version:      version.Full(),
+		PrivilegeKey: util.GetAuthKey(g.GlbClientCfg.Token, now),
+		Timestamp:    now,
+		RunId:        svr.runId,
+	}
+
+	if err = msg.WriteMsg(conn, loginMsg); err != nil {
+		return
+	}
+
+	var loginRespMsg msg.LoginResp
+	conn.SetReadDeadline(time.Now().Add(10 * time.Second))
+	if err = msg.ReadMsgInto(conn, &loginRespMsg); err != nil {
+		return
+	}
+	conn.SetReadDeadline(time.Time{})
+
+	if loginRespMsg.Error != "" {
+		err = fmt.Errorf("%s", loginRespMsg.Error)
+		log.Error("%s", loginRespMsg.Error)
+		return
+	}
+
+	svr.runId = loginRespMsg.RunId
+	g.GlbClientCfg.ServerUdpPort = loginRespMsg.ServerUdpPort
+	log.Info("login to server success, get run id [%s], server udp port [%d]", loginRespMsg.RunId, loginRespMsg.ServerUdpPort)
+	return
+}
+
+func (svr *Service) ReloadConf(pxyCfgs map[string]config.ProxyConf, visitorCfgs map[string]config.VisitorConf) error {
+	svr.cfgMu.Lock()
+	svr.pxyCfgs = pxyCfgs
+	svr.visitorCfgs = visitorCfgs
+	svr.cfgMu.Unlock()
+
+	return svr.ctl.ReloadConf(pxyCfgs, visitorCfgs)
+}
+
+func (svr *Service) Close() {
+	atomic.StoreUint32(&svr.exit, 1)
+	svr.ctl.Close()
+	close(svr.closedCh)
 }
diff --git a/conf/frpc_full.ini b/conf/frpc_full.ini
index 2307eeb3..d9892f54 100644
--- a/conf/frpc_full.ini
+++ b/conf/frpc_full.ini
@@ -25,7 +25,7 @@ token = 12345678
 admin_addr = 127.0.0.1
 admin_port = 7400
 admin_user = admin
-admin_passwd = admin
+admin_pwd = admin
 
 # connections will be established in advance, default value is zero
 pool_count = 5