Compare commits
98 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8dda32a599 | ||
|
|
a9ce3cf733 | ||
|
|
e2a3b5f9ee | ||
|
|
7a752537c5 | ||
|
|
abd0b63fe9 | ||
|
|
7ac7cd452b | ||
|
|
94cecdb8c0 | ||
|
|
a8b35ba971 | ||
|
|
bc1ab84b75 | ||
|
|
acc895d2df | ||
|
|
23dbd0a92f | ||
|
|
c069b5cd97 | ||
|
|
f1dfe50d8b | ||
|
|
7d3820175f | ||
|
|
75032fdbb7 | ||
|
|
23b3ad63cf | ||
|
|
7afd0c86cd | ||
|
|
5f0a341d22 | ||
|
|
2a117376b7 | ||
|
|
2fc750532d | ||
|
|
477be63cff | ||
|
|
39b90357db | ||
|
|
2bd916eb73 | ||
|
|
4d1b450b33 | ||
|
|
cb70812cb7 | ||
|
|
42e030e368 | ||
|
|
f84cdc921d | ||
|
|
02c07e7f4f | ||
|
|
d6ea190688 | ||
|
|
b20487b928 | ||
|
|
004cf5693f | ||
|
|
ef8de6feb0 | ||
|
|
d791ebe634 | ||
|
|
5af4a1817e | ||
|
|
e97b8c55f3 | ||
|
|
99fcb76210 | ||
|
|
d4fd34165e | ||
|
|
bd4741a0a0 | ||
|
|
5945c32646 | ||
|
|
9a9dc2594d | ||
|
|
02547e9475 | ||
|
|
d81a823da1 | ||
|
|
df74bcc885 | ||
|
|
094bcebfa3 | ||
|
|
bb2a16720b | ||
|
|
86d9a0c0f3 | ||
|
|
5cf9d72ed3 | ||
|
|
801605676c | ||
|
|
a9dec75e59 | ||
|
|
08d9d90fe1 | ||
|
|
9749db9235 | ||
|
|
11073aaaa5 | ||
|
|
e35ddc4d53 | ||
|
|
99b06e813e | ||
|
|
7164349944 | ||
|
|
bf43b3adee | ||
|
|
6aa4b3c8a9 | ||
|
|
7a9f7ef95e | ||
|
|
ee1a9d3ec7 | ||
|
|
6a69e58be5 | ||
|
|
6e1d788677 | ||
|
|
24f8f789c5 | ||
|
|
2fb779f990 | ||
|
|
0a9d3cd309 | ||
|
|
977b1aba1c | ||
|
|
a02aeeb906 | ||
|
|
7e2e63137e | ||
|
|
4b35219c27 | ||
|
|
0247c4701d | ||
|
|
e2cd0b8e4f | ||
|
|
ee93171c63 | ||
|
|
ddd2302cb2 | ||
|
|
c96d2288b3 | ||
|
|
6f5a088091 | ||
|
|
9a07797e29 | ||
|
|
055a020d33 | ||
|
|
4681ff3827 | ||
|
|
cff92faf06 | ||
|
|
890daf5489 | ||
|
|
182bdeb766 | ||
|
|
a4a953b167 | ||
|
|
ff37b7e18c | ||
|
|
7aa0e78c15 | ||
|
|
d798807693 | ||
|
|
35b78c2da6 | ||
|
|
66a4291c97 | ||
|
|
e89a965aff | ||
|
|
85a9f10be4 | ||
|
|
8bc6e0ffec | ||
|
|
98fc0ade4a | ||
|
|
f5626c21f7 | ||
|
|
b5a76c7ff2 | ||
|
|
612bae4c93 | ||
|
|
e45bf338cb | ||
|
|
577261806c | ||
|
|
8842097bd4 | ||
|
|
a4b14dd0bd | ||
|
|
94f0142c7d |
37
CHANGELOG
@ -1,4 +1,41 @@
|
||||
proxy更新日志
|
||||
v4.2
|
||||
1.优化了内网穿透,避免了client意外下线,导致链接信息残留的问题.
|
||||
2.http代理增加了SNI支持,现在http(s)代理模式支持反向代理,支持http(s)透明代理.
|
||||
3.增加了英文手册.
|
||||
|
||||
v4.1
|
||||
1.优化了http(s),socks5代理中的域名智能判断,如果是内网IP,直接走本地网络,提升浏览体验,
|
||||
同时优化了检查机制,判断更快.
|
||||
2.http代理basic认证增加了对https协议的支持,现在basic认证可以控制所有http(s)流量了.
|
||||
3.项目代码增加了依赖类库vendor目录,clone下来就能go build,再也不用担心go get依赖类库
|
||||
失败导致不能编译了.
|
||||
|
||||
v4.0
|
||||
1.内网穿透三端重构了一个multiplexing版本,使用github.com/xtaci/smux实现了tcp链接的多路复用,
|
||||
鼎鼎大名的kcp-go底层就是使用的这个库,基于kcp-go的双边加速工具kcptun的广泛使用已经很好
|
||||
的验证来该库的强大与稳定。multiplexing版的内网穿透对应的子命令分别是server,client,bridge
|
||||
使用方式和参数与之前的子命令tserver,tclient,tserver完全一样,另外server,client增加了
|
||||
压缩传输参数--c,使用压缩传输速度更快。
|
||||
|
||||
v3.9
|
||||
1.增加了守护运行参数--forever,比如: proxy http --forever ,
|
||||
proxy会fork子进程,然后监控子进程,如果子进程异常退出,5秒后重启子进程.
|
||||
该参数配合后台运行参数--daemon和日志参数--log,可以保障proxy一直在后台执行不会因为意外退出,
|
||||
而且可以通过日志文件看到proxy的输出日志内容.
|
||||
比如: proxy http -p ":9090" --forever --log proxy.log --daemon
|
||||
|
||||
v3.8
|
||||
1.增加了日志输出到文件--log参数,比如: --log proxy.log,日志就会输出到proxy.log方便排除问题.
|
||||
|
||||
v3.7
|
||||
1.修复了socks代理不能正常和上级代理通讯的问题.
|
||||
|
||||
|
||||
v3.6
|
||||
1.http(s),socks代理,集成了外部HTTP API认证,可以通过外部API对用户名和密码进行认证.
|
||||
2.手册http(s),socks代理认证部分增加了集成外部HTTP API认证的使用说明.
|
||||
|
||||
v3.5
|
||||
1.优化了kcp参数,速度有所提升.
|
||||
2.修复了socks无法正常工作的问题.
|
||||
|
||||
133
Godeps/Godeps.json
generated
Normal file
@ -0,0 +1,133 @@
|
||||
{
|
||||
"ImportPath": "proxy",
|
||||
"GoVersion": "go1.8",
|
||||
"GodepVersion": "v79",
|
||||
"Packages": [
|
||||
"./..."
|
||||
],
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "github.com/alecthomas/template",
|
||||
"Rev": "a0175ee3bccc567396460bf5acd36800cb10c49c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/alecthomas/template/parse",
|
||||
"Rev": "a0175ee3bccc567396460bf5acd36800cb10c49c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/alecthomas/units",
|
||||
"Rev": "2efee857e7cfd4f3d0138cc3cbb1b4966962b93a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/snappy",
|
||||
"Rev": "553a641470496b2327abcac10b36396bd98e45c9"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/pkg/errors",
|
||||
"Comment": "v0.8.0-6-g602255c",
|
||||
"Rev": "602255cdb6deaf1523ea53ac30eae5554ba7bee9"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/templexxx/cpufeat",
|
||||
"Rev": "3794dfbfb04749f896b521032f69383f24c3687e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/templexxx/reedsolomon",
|
||||
"Comment": "0.1.1-4-g7092926",
|
||||
"Rev": "7092926d7d05c415fabb892b1464a03f8228ab80"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/templexxx/xor",
|
||||
"Comment": "0.1.2",
|
||||
"Rev": "0af8e873c554da75f37f2049cdffda804533d44c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/tjfoc/gmsm/sm4",
|
||||
"Comment": "v1.0.1-3-g9d99fac",
|
||||
"Rev": "9d99face20b0dd300b7db50b3f69758de41c096a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/xtaci/kcp-go",
|
||||
"Comment": "v3.19-6-g21da33a",
|
||||
"Rev": "21da33a6696d67c1bffb3c954366499d613097a6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/xtaci/smux",
|
||||
"Comment": "v1.0.6",
|
||||
"Rev": "ebec7ef2574b42a7088cd7751176483e0a27d458"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/blowfish",
|
||||
"Rev": "f899cbd3df85058aa20d1cf129473b18f2a2b49f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/cast5",
|
||||
"Rev": "86e16787bfd59cb4db9e278c51a95488c141a5d6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/curve25519",
|
||||
"Rev": "1843fabd21d7180cf65e36759986d00c13dbb0fd"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/ed25519",
|
||||
"Rev": "1843fabd21d7180cf65e36759986d00c13dbb0fd"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/ed25519/internal/edwards25519",
|
||||
"Rev": "1843fabd21d7180cf65e36759986d00c13dbb0fd"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/pbkdf2",
|
||||
"Rev": "1843fabd21d7180cf65e36759986d00c13dbb0fd"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/salsa20",
|
||||
"Rev": "1843fabd21d7180cf65e36759986d00c13dbb0fd"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/salsa20/salsa",
|
||||
"Rev": "1843fabd21d7180cf65e36759986d00c13dbb0fd"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/ssh",
|
||||
"Rev": "1843fabd21d7180cf65e36759986d00c13dbb0fd"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/tea",
|
||||
"Rev": "1843fabd21d7180cf65e36759986d00c13dbb0fd"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/twofish",
|
||||
"Rev": "1843fabd21d7180cf65e36759986d00c13dbb0fd"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/xtea",
|
||||
"Rev": "1843fabd21d7180cf65e36759986d00c13dbb0fd"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/bpf",
|
||||
"Rev": "114479435b31b5077a087cc5303a45cb5d355dc4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/internal/iana",
|
||||
"Rev": "114479435b31b5077a087cc5303a45cb5d355dc4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/internal/socket",
|
||||
"Rev": "114479435b31b5077a087cc5303a45cb5d355dc4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/ipv4",
|
||||
"Rev": "114479435b31b5077a087cc5303a45cb5d355dc4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/time/rate",
|
||||
"Rev": "8be79e1e0910c292df4e79c241bb7e8f7e725959"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/alecthomas/kingpin.v2",
|
||||
"Comment": "v2.2.5",
|
||||
"Rev": "1087e65c9441605df944fb12c33f0fe7072d18ca"
|
||||
}
|
||||
]
|
||||
}
|
||||
5
Godeps/Readme
generated
Normal file
@ -0,0 +1,5 @@
|
||||
This directory tree is generated automatically by godep.
|
||||
|
||||
Please do not edit.
|
||||
|
||||
See https://github.com/tools/godep for more information.
|
||||
771
README.md
@ -1,125 +1,132 @@
|
||||
<img src="https://github.com/snail007/goproxy/blob/master/docs/images/logo.jpg?raw=true" width="200"/>
|
||||
Proxy是golang实现的高性能http,https,websocket,tcp,udp,socks5代理服务器,支持正向代理、内网穿透、SSH中转。
|
||||
Proxy is a high performance HTTP, HTTPS, HTTPS, websocket, TCP, UDP, Socks5 proxy server implemented by golang. It supports parent proxy,nat forward,TCP/UDP port forwarding, SSH transfer. you can expose a local server behind a NAT or firewall to the internet.
|
||||
|
||||
---
|
||||
|
||||
[](https://github.com/snail007/goproxy/) []() [](https://github.com/snail007/goproxy/releases) [](https://github.com/snail007/goproxy/releases)
|
||||
|
||||
[中文手册](/README_ZH.md)
|
||||
|
||||
### Features
|
||||
- 链式代理,程序本身可以作为一级代理,如果设置了上级代理那么可以作为二级代理,乃至N级代理.
|
||||
- 通讯加密,如果程序不是一级代理,而且上级代理也是本程序,那么可以加密和上级代理之间的通讯,采用底层tls高强度加密,安全无特征.
|
||||
- 智能HTTP,SOCKS5代理,会自动判断访问的网站是否屏蔽,如果被屏蔽那么就会使用上级代理(前提是配置了上级代理)访问网站;如果访问的网站没有被屏蔽,为了加速访问,代理会直接访问网站,不使用上级代理.
|
||||
- 域名黑白名单,更加自由的控制网站的访问方式。
|
||||
- 跨平台性,无论你是widows,linux,还是mac,甚至是树莓派,都可以很好的运行proxy.
|
||||
- 多协议支持,支持HTTP(S),TCP,UDP,Websocket,SOCKS5代理.
|
||||
- 支持内网穿透,协议支持TCP和UDP.
|
||||
- SSH中转,HTTP(S),SOCKS5代理支持SSH中转,上级Linux服务器不需要任何服务端,本地一个proxy即可开心上网.
|
||||
- 支持[KCP](https://github.com/xtaci/kcp-go)协议,HTTP(S),SOCKS5代理支持KCP协议传输数据,降低延迟,提升浏览体验.
|
||||
- chain-style proxy: the program itself can be a primary proxy, and if a parent proxy is set, it can be used as a second level proxy or even a N level proxy.
|
||||
- Encrypted communication: if the program is not a primary proxy, and the parent proxy is also the program, then it can communicate with the parent proxy by encryption. The TLS encryption is high-intensity encryption, and it is safe and featureless.
|
||||
- Intelligent HTTP, SOCKS5 proxy: the program will automatically determine whether the site which it access is blocked, if the site is blocked, the program will use parent proxy (the premise is you set up a parent proxy) to access the site. If the site isn't blocked, in order to speed up the access, the program will directly access the site and don't use parent proxy.
|
||||
- The black-and-white list of domain: It is very flexible to control the way which you visite site.
|
||||
- Cross platform: no mater what the os (such as Linux, windows, and even Raspberry Pi) you use, you always can use proxy well.
|
||||
- Multi protocol support: the program support HTTP (S), TCP, UDP, Websocket, SOCKS5 proxy.
|
||||
- The TCP/UDP port forwarding is supported.
|
||||
- Nat forwarding in different network is supported: the program support TCP protocol and UDP protocol.
|
||||
- SSH forwarding: HTTP (S), SOCKS5 proxy support SSH transfer, parent Linux server does not need any server, a local proxy can be happy to access the Internet.
|
||||
- [KCP](https://github.com/xtaci/kcp-go) protocol is supported: HTTP (S), SOCKS5 proxy supports the KCP protocol which can transmit data, reduce latency, and improve the browsing experience.
|
||||
- The integrated external API, HTTP (S): SOCKS5 proxy authentication can be integrated with the external HTTP API, which can easily control the user's access through the external system.
|
||||
|
||||
### Why need these?
|
||||
- 当由于某某原因,我们不能访问我们在其它地方的服务,我们可以通过多个相连的proxy节点建立起一个安全的隧道访问我们的服务.
|
||||
- 微信接口本地开发,方便调试.
|
||||
- 远程访问内网机器.
|
||||
- 和小伙伴一起玩局域网游戏.
|
||||
- 以前只能在局域网玩的,现在可以在任何地方玩.
|
||||
- 替代圣剑内网通,显IP内网通,花生壳之类的工具.
|
||||
- Because for some reason, we cannot access our services elsewhere. We can build a secure tunnel to access our services through multiple connected proxy nodes.
|
||||
- WeChat interface is developed locally, which is convenient to debug.
|
||||
- Remote access to intranet machines.
|
||||
- Play with partners in a LAN game.
|
||||
- something used to be played only in the LAN, now it can be played anywhere.
|
||||
- Instead of 剑内网通,显IP内网通,花生壳,frp and so on.
|
||||
- ...
|
||||
|
||||
|
||||
本页是v3.4手册,其他版本手册请点击下面链接查看.
|
||||
- [v3.3手册](https://github.com/snail007/goproxy/tree/v3.3)
|
||||
- [v3.2手册](https://github.com/snail007/goproxy/tree/v3.2)
|
||||
- [v3.1手册](https://github.com/snail007/goproxy/tree/v3.1)
|
||||
- [v3.0手册](https://github.com/snail007/goproxy/tree/v3.0)
|
||||
- [v2.x手册](https://github.com/snail007/goproxy/tree/v2.2)
|
||||
This page is the v4.0-v4.1 manual, and the other version of the manual can be checked by the following link.
|
||||
- [v3.9 manual](https://github.com/snail007/goproxy/tree/v3.9)
|
||||
- [v3.8 manual](https://github.com/snail007/goproxy/tree/v3.8)
|
||||
- [v3.6-v3.7 manual](https://github.com/snail007/goproxy/tree/v3.6)
|
||||
- [v3.5 manual](https://github.com/snail007/goproxy/tree/v3.5)
|
||||
- [v3.4 manual](https://github.com/snail007/goproxy/tree/v3.4)
|
||||
- [v3.3 manual](https://github.com/snail007/goproxy/tree/v3.3)
|
||||
- [v3.2 manual](https://github.com/snail007/goproxy/tree/v3.2)
|
||||
- [v3.1 manual](https://github.com/snail007/goproxy/tree/v3.1)
|
||||
- [v3.0 manual](https://github.com/snail007/goproxy/tree/v3.0)
|
||||
- [v2.x manual](https://github.com/snail007/goproxy/tree/v2.2)
|
||||
|
||||
### 安装
|
||||
1. [快速安装](#自动安装)
|
||||
1. [手动安装](#手动安装)
|
||||
### How to find the organization?
|
||||
[Click to join the communication organization](https://gitter.im/go-proxy/Lobby?utm_source=share-link&utm_medium=link&utm_campaign=share-link)
|
||||
|
||||
### 首次使用必看
|
||||
- [环境](#首次使用必看-1)
|
||||
- [使用配置文件](#使用配置文件)
|
||||
- [后台运行](#后台运行)
|
||||
- [生成通讯证书文件](#生成加密通讯需要的证书文件)
|
||||
- [安全建议](#安全建议)
|
||||
### Installation
|
||||
- [Quick installation](#quick-installation)
|
||||
- [Manual installation](#manual-installation)
|
||||
|
||||
### 手册目录
|
||||
- [1. HTTP代理](#1http代理)
|
||||
- [1.1 普通HTTP代理](#11普通http代理)
|
||||
- [1.2 普通二级HTTP代理](#12普通二级http代理)
|
||||
- [1.3 HTTP二级代理(加密)](#13http二级代理加密)
|
||||
- [1.4 HTTP三级代理(加密)](#14http三级代理加密)
|
||||
- [1.5 Basic认证](#15basic认证)
|
||||
- [1.6 强制走上级HTTP代理](#16http代理流量强制走上级http代理)
|
||||
- [1.7 通过SSH中转](#17https通过ssh中转)
|
||||
- [1.7.1 用户名和密码的方式](#171-ssh用户名和密码的方式)
|
||||
- [1.7.2 用户名和密钥的方式](#172-ssh用户名和密钥的方式)
|
||||
- [1.8 KCP协议传输](#18kcp协议传输)
|
||||
- [1.9 查看帮助](#19查看帮助)
|
||||
- [2. TCP代理](#2tcp代理)
|
||||
- [2.1 普通一级TCP代理](#21普通一级tcp代理)
|
||||
- [2.2 普通二级TCP代理](#22普通二级tcp代理)
|
||||
- [2.3 普通三级TCP代理](#23普通三级tcp代理)
|
||||
- [2.4 加密二级TCP代理](#24加密二级tcp代理)
|
||||
- [2.5 加密三级TCP代理](#25加密三级tcp代理)
|
||||
- [2.6 查看帮助](#26查看帮助)
|
||||
- [3. UDP代理](#3udp代理)
|
||||
- [3.1 普通一级TCP代理](#31普通一级udp代理)
|
||||
- [3.2 普通二级TCP代理](#32普通二级udp代理)
|
||||
- [3.3 普通三级TCP代理](#33普通三级udp代理)
|
||||
- [3.4 加密二级TCP代理](#34加密二级udp代理)
|
||||
- [3.5 加密三级TCP代理](#35加密三级udp代理)
|
||||
- [3.6 查看帮助](#36查看帮助)
|
||||
- [4. 内网穿透](#4内网穿透)
|
||||
- [4.1 原理说明](#41原理说明)
|
||||
- [4.2 TCP普通用法](#42tcp普通用法)
|
||||
- [4.3 微信接口本地开发](#43微信接口本地开发)
|
||||
- [4.4 UDP普通用法](#44udp普通用法)
|
||||
- [4.5 高级用法一](#45高级用法一)
|
||||
- [4.6 高级用法一](#46高级用法二)
|
||||
- [4.7 tserver的-r参数](#47tserver的-r参数)
|
||||
- [4.8 查看帮助](#48查看帮助)
|
||||
- [5. SOCKS5代理](#5socks5代理)
|
||||
- [5.1 普通SOCKS5代理](#51普通socks5代理)
|
||||
- [5.2 普通二级SOCKS5代理](#52普通二级socks5代理)
|
||||
- [5.3 SOCKS二级代理(加密)](#53socks二级代理加密)
|
||||
- [5.4 SOCKS三级代理(加密)](#54socks三级代理加密)
|
||||
- [5.5 流量强制走上级SOCKS代理](#55socks代理流量强制走上级socks代理)
|
||||
- [5.6 通过SSH中转](#56socks通过ssh中转)
|
||||
- [5.6.1 用户名和密码的方式](#561-ssh用户名和密码的方式)
|
||||
- [5.6.2 用户名和密钥的方式](#562-ssh用户名和密钥的方式)
|
||||
- [5.7 认证](#57认证)
|
||||
- [5.8 KCP协议传输](#58kcp协议传输)
|
||||
- [5.9 查看帮助](#59查看帮助)
|
||||
### First use must be read
|
||||
- [Environmental Science](#environmental-science)
|
||||
- [Use configuration file](#use-configuration-file)
|
||||
- [Debug output](#debug-output)
|
||||
- [Using log files](#using-log-files)
|
||||
- [Daemon mode](#daemon-mode)
|
||||
- [Monitor mode](#monitor-mode)
|
||||
- [Generating a communication certificate file](#generating-a-communication-certificate-file)
|
||||
- [Safety advice](#safety-advice)
|
||||
|
||||
### Manual catalogues
|
||||
- [1.HTTP proxy](#1http-proxy)
|
||||
- [1.1 Common HTTP proxy](#11common-http-proxy)
|
||||
- [1.2 Common HTTP second level proxy](#12common-http-second-level-proxy)
|
||||
- [1.3 HTTP second level proxy(encrypted)](#13http-second-level-encrypted-proxy)
|
||||
- [1.4 HTTP third level proxy(encrypted)](#14http-third-level-encrypted-proxy)
|
||||
- [1.5 Basic Authentication](#15basic-authentication)
|
||||
- [1.6 HTTP proxy traffic force to go to parent http proxy](#16http-proxy-traffic-force-to-go-to-parent-http-proxy)
|
||||
- [1.7 Transfer through SSH](#17transfer-through-ssh)
|
||||
- [1.7.1 The way of username and password](#171the-way-of-username-and-password)
|
||||
- [1.7.2 The way of username and key](#172the-way-of-username-and-key)
|
||||
- [1.8 KCP protocol transmission](#18kcp-protocol-transmission)
|
||||
- [1.9 View help](#19view-help)
|
||||
- [2.TCP proxy](#2tcp-proxy)
|
||||
- [2.1 Common TCP first level proxy](#21common-tcp-first-level-proxy)
|
||||
- [2.2 Common TCP second level proxy](#22common-tcp-second-level-proxy)
|
||||
- [2.3 Common TCP third level proxy](#23common-tcp-third-level-proxy)
|
||||
- [2.4 TCP second level encrypted proxy](#24tcp-second-level-encrypted-proxy)
|
||||
- [2.5 TCP third level encrypted proxy](#25tcp-third-level-encrypted-proxy)
|
||||
- [2.6 View help](#26view-help)
|
||||
- [3.UDP proxy](#3udp-proxy)
|
||||
- [3.1 Common UDP first level proxy](#31common-udp-first-level-proxy)
|
||||
- [3.2 Common UDP second level proxy](#32common-udp-second-level-proxy)
|
||||
- [3.3 Common UDP third level proxy](#33common-udp-third-level-proxy)
|
||||
- [3.4 UDP second level encrypted proxy](#34udp-second-level-encrypted-proxy)
|
||||
- [3.5 UDP third level encrypted proxy](#35udp-third-level-encrypted-proxy)
|
||||
- [3.6 View help](#36view-help)
|
||||
- [4.Nat forward](#4nat-forward)
|
||||
- [4.1 Principle explanation](#41principle-explanation)
|
||||
- [4.2 TCP common usage](#42tcp-common-usage)
|
||||
- [4.3 Local development of WeChat interface](#43local-development-of-wechat-interface)
|
||||
- [4.4 UDP common usage](#44udp-common-usage)
|
||||
- [4.5 Advanced usage 1](#45advanced-usage-1)
|
||||
- [4.6 Advanced usage 2](#46advanced-usage-2)
|
||||
- [4.7 -r parameters of server](#47-r-parameters-of-server)
|
||||
- [4.8 View help](#48view-help)
|
||||
- [5.SOCKS5 proxy](#5socks5-proxy)
|
||||
- [5.1 Common SOCKS5 proxy](#51common-socks5-proxy)
|
||||
- [5.2 Common SOCKS5 second level proxy](#52common-socks5-second-level-proxy)
|
||||
- [5.3 SOCKS5 second level proxy(encrypted)](#53socks-second-level-encrypted-proxy)
|
||||
- [5.4 SOCKS third level proxy(encrypted)](#54socks-third-level-encrypted-proxy)
|
||||
- [5.5 SOCKS proxy traffic force to go to parent socks proxy](#55socks-proxy-traffic-force-to-go-to-parent-socks-proxy)
|
||||
- [5.6 Transfer through SSH](#56transfer-through-ssh)
|
||||
- [5.6.1 The way of username and password](#561the-way-of-username-and-password)
|
||||
- [5.6.2 The way of username and key](#562the-way-of-username-and-key)
|
||||
- [5.7 Authentication](#57authentication)
|
||||
- [5.8 KCP protocol transmission](#58kcp-protocol-transmission)
|
||||
- [5.9 View help](#59view-help)
|
||||
|
||||
### Fast Start
|
||||
提示:所有操作需要root权限.
|
||||
#### 自动安装
|
||||
#### **0.如果你的VPS是linux64位的系统,那么只需要执行下面一句,就可以完成自动安装和配置.**
|
||||
tips:all operations require root permissions.
|
||||
#### Quick installation
|
||||
#### **0. If your VPS is a linux64 system, you can complete the automatic installation and configuration by the following sentence.**
|
||||
```shell
|
||||
curl -L https://raw.githubusercontent.com/snail007/goproxy/master/install_auto.sh | bash
|
||||
```
|
||||
安装完成,配置目录是/etc/proxy,更详细的使用方法参考下面的进一步了解.
|
||||
如果安装失败或者你的vps不是linux64位系统,请按照下面的半自动步骤安装:
|
||||
The installation is completed, the configuration directory is /etc/proxy, more detailed use of the method referred to the following manual for further understanding.
|
||||
If the installation fails or your VPS is not a linux64 system, please follow the semi-automatic step below:
|
||||
|
||||
#### 手动安装
|
||||
#### **1.登录你的VPS,下载守护进程monexec,选择合适你的版本,vps一般选择"linux_amd64.tar.gz"的即可.**
|
||||
下载地址:https://github.com/reddec/monexec/releases
|
||||
比如下载到/root/proxy/
|
||||
执行:
|
||||
```shell
|
||||
mkdir /root/proxy/
|
||||
cd /root/proxy/
|
||||
wget https://github.com/reddec/monexec/releases/download/v0.1.1/monexec_0.1.1_linux_amd64.tar.gz
|
||||
```
|
||||
#### **2.下载proxy**
|
||||
下载地址:https://github.com/snail007/goproxy/releases
|
||||
#### Manual installation
|
||||
|
||||
#### **1.Download proxy**
|
||||
Download address: https://github.com/snail007/goproxy/releases
|
||||
```shell
|
||||
cd /root/proxy/
|
||||
wget https://github.com/snail007/goproxy/releases/download/v3.4/proxy-linux-amd64.tar.gz
|
||||
wget https://github.com/snail007/goproxy/releases/download/v4.0/proxy-linux-amd64.tar.gz
|
||||
```
|
||||
#### **3.下载自动安装脚本**
|
||||
#### **2.Download the automatic installation script**
|
||||
```shell
|
||||
cd /root/proxy/
|
||||
wget https://raw.githubusercontent.com/snail007/goproxy/master/install.sh
|
||||
@ -127,419 +134,469 @@ chmod +x install.sh
|
||||
./install.sh
|
||||
```
|
||||
|
||||
## 首次使用必看
|
||||
## **First use must be read**
|
||||
|
||||
#### **环境**
|
||||
接下来的教程,默认系统是linux,程序是proxy;所有操作需要root权限;
|
||||
如果你的是windows,请使用windows版本的proxy.exe即可.
|
||||
### **Environmental Science**
|
||||
The following tutorial, the default system is Linux, the program is proxy; all operations require root permissions.
|
||||
If the system are windows, please use proxy.exe.
|
||||
|
||||
### **使用配置文件**
|
||||
接下来的教程都是通过命令行参数介绍使用方法,也可以通过读取配置文件获取参数.
|
||||
具体格式是通过@符号指定配置文件,例如:./proxy @configfile.txt
|
||||
configfile.txt里面的格式是,第一行是子命令名称,第二行开始一行一个:参数的长格式=参数值,前后不能有空格和双引号.
|
||||
参数的长格式都是--开头的,短格式参数都是-开头,如果你不知道某个短格式参数对应长格式参数,查看帮助命令即可.
|
||||
比如configfile.txt内容如下:
|
||||
### **Use configuration file**
|
||||
The following tutorial is to introduce the use method by the command line parameters, or by reading the configuration file to get the parameters.
|
||||
The specific format is to specify a configuration file by the @ symbol, for example, ./proxy @configfile.txt.
|
||||
configfile.txt's format: The first line is the subcommand name, and the second line begins one line: the long format of the parameter = the parameter value, there is no space and double quotes before and after.
|
||||
The long format of the parameter's beginning is always --, the short format of the parameter's beginning is always -. If you don't know which short form parameter corresponds to the long format parameter, please look at the help command.
|
||||
For example, the contents of configfile.txt are as follows:
|
||||
```shell
|
||||
http
|
||||
--local-type=tcp
|
||||
--local=:33080
|
||||
```
|
||||
### 生成加密通讯需要的证书文件
|
||||
### **Debug output**
|
||||
By default, the log output information does not contain the number of file lines. In some cases, in order to eliminate and positione the program problem, You can use the --debug parameter to output the number of lines of code and the wrong time.
|
||||
|
||||
http,tcp,udp代理过程会和上级通讯,为了安全我们采用加密通讯,当然可以选择不加密通信通讯,本教程所有和上级通讯都采用加密,需要证书文件.
|
||||
在linux上并安装了openssl命令,可以直接通过下面的命令生成证书和key文件.
|
||||
### **Using log files**
|
||||
By default, the log is displayed directly on the console, and if you want to save it to the file, you can use the --log parameter.
|
||||
for example, --log proxy.log, The log will be exported to proxy.log file which is easy to troubleshoot.
|
||||
|
||||
### **Generating a communication certificate file**
|
||||
HTTP, TCP, UDP proxy process will communicate with parent proxy. In order to secure, we use encrypted communication. Of course, we can choose not to encrypted communication. All communication with parent proxy in this tutorial is encrypted, requiring certificate files.
|
||||
The OpenSSL command is installed on the Linux and encrypted certificate can be generated directly through the following command.
|
||||
`./proxy keygen`
|
||||
默认会在当前程序目录下面生成证书文件proxy.crt和key文件proxy.key。
|
||||
By default, the certificate file proxy.crt and the key file proxy.key are generated under the current program directory.
|
||||
|
||||
### 后台运行
|
||||
默认执行proxy之后,如果要保持proxy运行,不能关闭命令行.
|
||||
如果想在后台运行proxy,命令行可以关闭,只需要在命令最后加上--daemon参数即可.
|
||||
比如:
|
||||
`./proxy http -t tcp -p "0.0.0.0:38080" --daemon`
|
||||
更推荐用monexec守护运行proxy比较好.
|
||||
### **Daemon mode**
|
||||
After the default execution of proxy, if you want to keep proxy running, you can't close the command line.
|
||||
If you want to run proxy in the daemon mode, the command line can be shut down, just add the --daemon parameter at the end of the command.
|
||||
for example: `./proxy http -t tcp -p "0.0.0.0:38080" --daemon`
|
||||
|
||||
### 安全建议
|
||||
当VPS在nat设备后面,vps上网卡IP都是内网IP,这个时候可以通过-g参数添加vps的外网ip防止死循环.
|
||||
假设你的vps外网ip是23.23.23.23,下面命令通过-g参数设置23.23.23.23
|
||||
### **Monitor mode**
|
||||
Monitor mode parameter --forever, for example: `proxy http --forever`,
|
||||
Proxy will fork subprocess, then monitor the child process, if the subprocess exits, restarts the subprocess after 5 seconds.
|
||||
This parameter, with the parameter --daemon and the log parameter --log, can guarantee that the proxy has been ran in the background and not exited accidentally.
|
||||
And you can see the output log of proxy through the log file.
|
||||
for example: `proxy http -p ":9090" --forever --log proxy.log --daemon`
|
||||
|
||||
### **Safety advice**
|
||||
When vps is behind the NAT, the network card IP on VPS is an internal network IP, and then you can add the VPS's external network IP to prevent the dead cycle by -g parameter.
|
||||
Assuming that your VPS outer external network IP is 23.23.23.23, the following command sets the 23.23.23.23 through the -g parameter.
|
||||
`./proxy http -g "23.23.23.23"`
|
||||
|
||||
### 1.HTTP代理
|
||||
#### **1.1.普通HTTP代理**
|
||||
### **1.HTTP proxy**
|
||||
#### **1.1.common HTTP proxy**
|
||||
`./proxy http -t tcp -p "0.0.0.0:38080"`
|
||||
|
||||
#### **1.2.普通二级HTTP代理**
|
||||
使用本地端口8090,假设上级HTTP代理是`22.22.22.22:8080`
|
||||
#### **1.2.Common HTTP second level proxy**
|
||||
Using local port 8090, assume the parent HTTP proxy is: `22.22.22.22:8080`
|
||||
`./proxy http -t tcp -p "0.0.0.0:8090" -T tcp -P "22.22.22.22:8080" `
|
||||
默认关闭了连接池,如果要加快访问速度,-L可以开启连接池,10就是连接池大小,0为关闭,
|
||||
开启连接池在网络不好的情况下,稳定不是很好.
|
||||
The connection pool is closed by default. If you want to speed up access speed, -L can open the connection pool, the 10 is the size of the connection pool, and the 0 is closed.
|
||||
It is not good to stability of connection pool when the network is not good.
|
||||
`./proxy http -t tcp -p "0.0.0.0:8090" -T tcp -P "22.22.22.22:8080" -L 10`
|
||||
我们还可以指定网站域名的黑白名单文件,一行一个域名,怕匹配规则是最右批评匹配,比如:baidu.com,匹配的是*.*.baidu.com,黑名单的域名域名直接走上级代理,白名单的域名不走上级代理.
|
||||
We can also specify the black and white list files of the domain name, one line for one domain name. The matching rule is the most right-hand matching, for example, baidu.com, which matches *.*.baidu.com. The domain name of the blacklist is directly headed by the parent proxy, and the domain name of the white list does not go to the parent proxy.
|
||||
`./proxy http -p "0.0.0.0:8090" -T tcp -P "22.22.22.22:8080" -b blocked.txt -d direct.txt`
|
||||
|
||||
#### **1.3.HTTP二级代理(加密)**
|
||||
一级HTTP代理(VPS,IP:22.22.22.22)
|
||||
#### **1.3.HTTP second level encrypted proxy**
|
||||
HTTP first level proxy(VPS,IP:22.22.22.22)
|
||||
`./proxy http -t tls -p ":38080" -C proxy.crt -K proxy.key`
|
||||
|
||||
二级HTTP代理(本地Linux)
|
||||
HTTP second level proxy(local Linux)
|
||||
`./proxy http -t tcp -p ":8080" -T tls -P "22.22.22.22:38080" -C proxy.crt -K proxy.key`
|
||||
那么访问本地的8080端口就是访问VPS上面的代理端口38080.
|
||||
accessing the local 8080 port is accessing the proxy port 38080 above VPS.
|
||||
|
||||
二级HTTP代理(本地windows)
|
||||
HTTP second level proxy(local windows)
|
||||
`./proxy.exe http -t tcp -p ":8080" -T tls -P "22.22.22.22:38080" -C proxy.crt -K proxy.key`
|
||||
然后设置你的windos系统中,需要通过代理上网的程序的代理为http模式,地址为:127.0.0.1,端口为:8080,程序即可通过加密通道通过vps上网。
|
||||
In your windos system, the mode of the program that needs to surf the Internet by proxy is setted up as HTTP mode, the address is 127.0.0.1, the port is: 8080, the program can go through the encrypted channel through VPS to surf on the internet.
|
||||
|
||||
#### **1.4.HTTP三级代理(加密)**
|
||||
一级HTTP代理VPS_01,IP:22.22.22.22
|
||||
#### **1.4.HTTP third level encrypted proxy**
|
||||
HTTP first level proxy VPS_01,IP:22.22.22.22
|
||||
`./proxy http -t tls -p ":38080" -C proxy.crt -K proxy.key`
|
||||
二级HTTP代理VPS_02,IP:33.33.33.33
|
||||
HTTP second level proxy VPS_02,IP:33.33.33.33
|
||||
`./proxy http -t tls -p ":28080" -T tls -P "22.22.22.22:38080" -C proxy.crt -K proxy.key`
|
||||
三级HTTP代理(本地)
|
||||
HTTP third level proxy(local)
|
||||
`./proxy http -t tcp -p ":8080" -T tls -P "33.33.33.33:28080" -C proxy.crt -K proxy.key`
|
||||
那么访问本地的8080端口就是访问一级HTTP代理上面的代理端口38080.
|
||||
Then access to the local 8080 port is access to the HTTP first level proxy which port is 38080.
|
||||
|
||||
#### **1.5.Basic认证**
|
||||
对于代理HTTP协议我们可以basic进行Basic认证,认证的用户名和密码可以在命令行指定
|
||||
#### **1.5.Basic Authentication**
|
||||
We can do Basic authentication for the HTTP proxy, The authenticated username and password can be specified at the command line.
|
||||
`./proxy http -t tcp -p ":33080" -a "user1:pass1" -a "user2:pass2"`
|
||||
多个用户,重复-a参数即可.
|
||||
也可以放在文件中,格式是一行一个"用户名:密码",然后用-F指定.
|
||||
If you need multiple users, repeat the -a parameters.
|
||||
You can also be placed in a file, which is a line, a ‘username: password’, and then specified in -F.
|
||||
`./proxy http -t tcp -p ":33080" -F auth-file.txt`
|
||||
如果没有-a或-F参数,就是关闭Basic认证.
|
||||
|
||||
#### **1.6.HTTP代理流量强制走上级HTTP代理**
|
||||
默认情况下,proxy会智能判断一个网站域名是否无法访问,如果无法访问才走上级HTTP代理.通过--always可以使全部HTTP代理流量强制走上级HTTP代理.
|
||||
In addition, the HTTP (s) proxy also integrates external HTTP API authentication, and we can specify a HTTP URL interface address by the --auth-url parameter.
|
||||
When somebody connect the proxy, which will request this URL by GET way, with the following four parameters, and if the HTTP state code 204 is returned, the authentication is successful.
|
||||
In other cases, authentication failed.
|
||||
for example:
|
||||
`./proxy http -t tcp -p ":33080" --auth-url "http://test.com/auth.php"`
|
||||
When the user connecte the proxy, which will request this URL by GET way("http://test.com/auth.php"),
|
||||
with user, pass, IP, and target four parameters:
|
||||
http://test.com/auth.php?user={USER}&pass={PASS}&ip={IP}&target={TARGET}
|
||||
user:username
|
||||
pass:password
|
||||
ip:user's IP,for example: 192.168.1.200
|
||||
target:URL user connect to, for example: http://demo.com:80/1.html or https://www.baidu.com:80
|
||||
|
||||
If there is no -a or -F or --auth-url parameters, Basic authentication is closed.
|
||||
|
||||
#### **1.6.HTTP proxy traffic force to go to parent http proxy**
|
||||
By default, proxy will intelligently judge whether a domain name can be accessed. If it cannot be accessed, it will access to parent HTTP proxy.
|
||||
Through --always, all HTTP proxy traffic can be coercion to the parent HTTP proxy.
|
||||
`./proxy http --always -t tls -p ":28080" -T tls -P "22.22.22.22:38080" -C proxy.crt -K proxy.key`
|
||||
|
||||
#### **1.7.HTTP(S)通过SSH中转**
|
||||
说明:ssh中转的原理是利用了ssh的转发功能,就是你连接上ssh之后,可以通过ssh代理访问目标地址.
|
||||
假设有:vps
|
||||
- IP是2.2.2.2, ssh端口是22, ssh用户名是:user, ssh用户密码是:demo
|
||||
- 用户user的ssh私钥名称是user.key
|
||||
#### **1.7.Transfer through SSH**
|
||||
Explanation: the principle of SSH transfer is to take advantage of SSH's forwarding function, which is, after you connect to SSH, you can access to the target address through the SSH proxy.
|
||||
Suppose there is a vps
|
||||
- IP is 2.2.2.2, ssh port is 22, ssh username is user, ssh password is demo
|
||||
- The SSH private key of the user is user.key
|
||||
|
||||
##### ***1.7.1 ssh用户名和密码的方式***
|
||||
本地HTTP(S)代理28080端口,执行:
|
||||
##### ***1.7.1.The way of username and password***
|
||||
Local HTTP (S) proxy use 28080 port,excute:
|
||||
`./proxy http -T ssh -P "2.2.2.2:22" -u user -A demo -t tcp -p ":28080"`
|
||||
##### ***1.7.2 ssh用户名和密钥的方式***
|
||||
本地HTTP(S)代理28080端口,执行:
|
||||
##### ***1.7.2.The way of username and key***
|
||||
Local HTTP (S) proxy use 28080 port,excute:
|
||||
`./proxy http -T ssh -P "2.2.2.2:22" -u user -S user.key -t tcp -p ":28080"`
|
||||
|
||||
#### **1.8.KCP协议传输**
|
||||
KCP协议需要-B参数设置一个密码用于加密解密数据
|
||||
#### **1.8.KCP protocol transmission**
|
||||
The KCP protocol requires a -B parameter to set a password which can encrypt and decrypt data.
|
||||
|
||||
一级HTTP代理(VPS,IP:22.22.22.22)
|
||||
Http first level proxy(VPS,IP:22.22.22.22)
|
||||
`./proxy http -t kcp -p ":38080" -B mypassword`
|
||||
|
||||
二级HTTP代理(本地Linux)
|
||||
Http second level proxy(os is Linux)
|
||||
`./proxy http -t tcp -p ":8080" -T kcp -P "22.22.22.22:38080" -B mypassword`
|
||||
那么访问本地的8080端口就是访问VPS上面的代理端口38080,数据通过kcp协议传输.
|
||||
Then access to the local 8080 port is access to the proxy's port 38080 on the VPS, and the data is transmitted through the KCP protocol.
|
||||
|
||||
#### **1.9.查看帮助**
|
||||
#### **1.9.view help**
|
||||
`./proxy help http`
|
||||
|
||||
### 2.TCP代理
|
||||
### **2.TCP proxy**
|
||||
|
||||
#### **2.1.普通一级TCP代理**
|
||||
本地执行:
|
||||
#### **2.1.Common TCP first level proxy**
|
||||
Local execution:
|
||||
`./proxy tcp -p ":33080" -T tcp -P "192.168.22.33:22" -L 0`
|
||||
那么访问本地33080端口就是访问192.168.22.33的22端口.
|
||||
Then access to the local 33080 port is the 22 port of access to 192.168.22.33.
|
||||
|
||||
#### **2.2.普通二级TCP代理**
|
||||
VPS(IP:22.22.22.33)执行:
|
||||
`./proxy tcp -p ":33080" -T tcp -P "127.0.0.1:8080" -L 0`
|
||||
本地执行:
|
||||
#### **2.2.Common TCP second level proxy**
|
||||
VPS(IP:22.22.22.33) execute:
|
||||
`./proxy tcp -p ":33080" -T tcp -P "127.0.0.1:8080" -L 0`
|
||||
local execution:
|
||||
`./proxy tcp -p ":23080" -T tcp -P "22.22.22.33:33080"`
|
||||
那么访问本地23080端口就是访问22.22.22.33的8080端口.
|
||||
Then access to the local 23080 port is the 8080 port of access to 22.22.22.33.
|
||||
|
||||
#### **2.3.普通三级TCP代理**
|
||||
一级TCP代理VPS_01,IP:22.22.22.22
|
||||
#### **2.3.Common TCP third level proxy**
|
||||
TCP first level proxy VPS_01,IP:22.22.22.22
|
||||
`./proxy tcp -p ":38080" -T tcp -P "66.66.66.66:8080" -L 0`
|
||||
二级TCP代理VPS_02,IP:33.33.33.33
|
||||
TCP second level proxy VPS_02,IP:33.33.33.33
|
||||
`./proxy tcp -p ":28080" -T tcp -P "22.22.22.22:38080"`
|
||||
三级TCP代理(本地)
|
||||
TCP third level proxy (local)
|
||||
`./proxy tcp -p ":8080" -T tcp -P "33.33.33.33:28080"`
|
||||
那么访问本地8080端口就是通过加密TCP隧道访问66.66.66.66的8080端口.
|
||||
Then access to the local 8080 port is to access the 8080 port of the 66.66.66.66 by encrypting the TCP tunnel.
|
||||
|
||||
#### **2.4.加密二级TCP代理**
|
||||
VPS(IP:22.22.22.33)执行:
|
||||
#### **2.4.TCP second level encrypted proxy**
|
||||
VPS(IP:22.22.22.33) execute:
|
||||
`./proxy tcp --tls -p ":33080" -T tcp -P "127.0.0.1:8080" -L 0 -C proxy.crt -K proxy.key`
|
||||
本地执行:
|
||||
local execution:
|
||||
`./proxy tcp -p ":23080" -T tls -P "22.22.22.33:33080" -C proxy.crt -K proxy.key`
|
||||
那么访问本地23080端口就是通过加密TCP隧道访问22.22.22.33的8080端口.
|
||||
Then access to the local 23080 port is to access the 8080 port of the 22.22.22.33 by encrypting the TCP tunnel.
|
||||
|
||||
#### **2.5.加密三级TCP代理**
|
||||
一级TCP代理VPS_01,IP:22.22.22.22
|
||||
#### **2.5.TCP third level encrypted proxy**
|
||||
TCP first level proxy VPS_01,IP:22.22.22.22
|
||||
`./proxy tcp --tls -p ":38080" -T tcp -P "66.66.66.66:8080" -C proxy.crt -K proxy.key`
|
||||
二级TCP代理VPS_02,IP:33.33.33.33
|
||||
TCP second level proxy VPS_02,IP:33.33.33.33
|
||||
`./proxy tcp --tls -p ":28080" -T tls -P "22.22.22.22:38080" -C proxy.crt -K proxy.key`
|
||||
三级TCP代理(本地)
|
||||
TCP third level proxy (local)
|
||||
`./proxy tcp -p ":8080" -T tls -P "33.33.33.33:28080" -C proxy.crt -K proxy.key`
|
||||
那么访问本地8080端口就是通过加密TCP隧道访问66.66.66.66的8080端口.
|
||||
Then access to the local 8080 port is to access the 8080 port of the 66.66.66.66 by encrypting the TCP tunnel.
|
||||
|
||||
#### **2.6.查看帮助**
|
||||
#### **2.6.view help**
|
||||
`./proxy help tcp`
|
||||
|
||||
### 3.UDP代理
|
||||
### **3.UDP proxy**
|
||||
|
||||
#### **3.1.普通一级UDP代理**
|
||||
本地执行:
|
||||
#### **3.1.Common UDP first level proxy**
|
||||
local execution:
|
||||
`./proxy udp -p ":5353" -T udp -P "8.8.8.8:53"`
|
||||
那么访问本地UDP:5353端口就是访问8.8.8.8的UDP:53端口.
|
||||
Then access to the local UDP:5353 port is access to the UDP:53 port of the 8.8.8.8.
|
||||
|
||||
#### **3.2.普通二级UDP代理**
|
||||
VPS(IP:22.22.22.33)执行:
|
||||
#### **3.2.Common UDP second level proxy**
|
||||
VPS(IP:22.22.22.33) execute:
|
||||
`./proxy tcp -p ":33080" -T udp -P "8.8.8.8:53"`
|
||||
本地执行:
|
||||
local execution:
|
||||
`./proxy udp -p ":5353" -T tcp -P "22.22.22.33:33080"`
|
||||
那么访问本地UDP:5353端口就是通过TCP隧道,通过VPS访问8.8.8.8的UDP:53端口.
|
||||
Then access to the local UDP:5353 port is access to the UDP:53 port of the 8.8.8.8 through the TCP tunnel.
|
||||
|
||||
#### **3.3.普通三级UDP代理**
|
||||
一级TCP代理VPS_01,IP:22.22.22.22
|
||||
#### **3.3.Common UDP third level proxy**
|
||||
TCP first level proxy VPS_01,IP:22.22.22.22
|
||||
`./proxy tcp -p ":38080" -T udp -P "8.8.8.8:53"`
|
||||
二级TCP代理VPS_02,IP:33.33.33.33
|
||||
TCP second level proxy VPS_02,IP:33.33.33.33
|
||||
`./proxy tcp -p ":28080" -T tcp -P "22.22.22.22:38080"`
|
||||
三级TCP代理(本地)
|
||||
TCP third level proxy (local)
|
||||
`./proxy udp -p ":5353" -T tcp -P "33.33.33.33:28080"`
|
||||
那么访问本地5353端口就是通过TCP隧道,通过VPS访问8.8.8.8的53端口.
|
||||
Then access to the local 5353 port is access to the 53 port of the 8.8.8.8 through the TCP tunnel.
|
||||
|
||||
#### **3.4.加密二级UDP代理**
|
||||
VPS(IP:22.22.22.33)执行:
|
||||
#### **3.4.UDP second level encrypted proxy**
|
||||
VPS(IP:22.22.22.33) execute:
|
||||
`./proxy tcp --tls -p ":33080" -T udp -P "8.8.8.8:53" -C proxy.crt -K proxy.key`
|
||||
本地执行:
|
||||
local execution:
|
||||
`./proxy udp -p ":5353" -T tls -P "22.22.22.33:33080" -C proxy.crt -K proxy.key`
|
||||
那么访问本地UDP:5353端口就是通过加密TCP隧道,通过VPS访问8.8.8.8的UDP:53端口.
|
||||
Then access to the local UDP:5353 port is access to the UDP:53 port of the 8.8.8.8 by the encrypting TCP tunnel.
|
||||
|
||||
#### **3.5.加密三级UDP代理**
|
||||
一级TCP代理VPS_01,IP:22.22.22.22
|
||||
#### **3.5.UDP third level encrypted proxy**
|
||||
TCP first level proxy VPS_01,IP:22.22.22.22
|
||||
`./proxy tcp --tls -p ":38080" -T udp -P "8.8.8.8:53" -C proxy.crt -K proxy.key`
|
||||
二级TCP代理VPS_02,IP:33.33.33.33
|
||||
TCP second level proxy VPS_02,IP:33.33.33.33
|
||||
`./proxy tcp --tls -p ":28080" -T tls -P "22.22.22.22:38080" -C proxy.crt -K proxy.key`
|
||||
三级TCP代理(本地)
|
||||
TCP third level proxy (local)
|
||||
`./proxy udp -p ":5353" -T tls -P "33.33.33.33:28080" -C proxy.crt -K proxy.key`
|
||||
那么访问本地5353端口就是通过加密TCP隧道,通过VPS_01访问8.8.8.8的53端口.
|
||||
Then access to the local UDP:5353 port is access to the UDP:53 port of the 8.8.8.8 by the encrypting TCP tunnel.
|
||||
|
||||
#### **3.6.查看帮助**
|
||||
#### **3.6.view help**
|
||||
`./proxy help udp`
|
||||
|
||||
### 4.内网穿透
|
||||
#### **4.1、原理说明**
|
||||
内网穿透,由三部分组成:client端,server端,bridge端;client和server主动连接bridge端进行桥接.
|
||||
当用户访问server端,流程是:
|
||||
1. server主动和bridge端建立连接;
|
||||
1. 然后bridge端通知client端连接bridge端,并连接内网目标端口;
|
||||
1. 然后绑定client端到bridge端和client端到内网端口的连接;
|
||||
1. 然后bridge端把client过来的连接与server端过来的连接绑定;
|
||||
1. 整个通道建立完成;
|
||||
### **4.Nat forward**
|
||||
#### **4.1、Principle explanation**
|
||||
Nat forward, divided into two versions, "multi-link version" and "multiplexed version", generally like web services Which is not a long time to connect the service recommended "multi-link version", if you want to keep long Time connection, "multiplexed version" is recommended.
|
||||
1. Multilink version, the corresponding subcommand is tserver,tclient,tbridge。
|
||||
1. Multiplexed version, the corresponding subcommand is server,client,bridge。
|
||||
1. the parameters and use of Multilink version and multiplexed is exactly the same.
|
||||
1. **Multiplexed version of the server, client can open the compressed transmission, the parameter is --c.**
|
||||
1. **Server, client or both are open compression, either do not open, can not only open one.**
|
||||
|
||||
#### **4.2、TCP普通用法**
|
||||
背景:
|
||||
- 公司机器A提供了web服务80端口
|
||||
- 有VPS一个,公网IP:22.22.22.22
|
||||
The following tutorial uses "Multiplexing Versions" as an example to illustrate how to use it.
|
||||
Nat forward consists of three parts: client-side, server-side, bridge-side; client and server take the initiative to connect the bridge to bridge.
|
||||
When the user access the server side, the process is:
|
||||
1. Server and bridge initiative to establish a link;
|
||||
1. Then the bridge notifies the client to connect the bridge, and connects the intranet target port;
|
||||
1. Then bind the client to the bridge and client to the internal network port connection;
|
||||
1. Then the bridge of the client over the connection and server-side connection binding;
|
||||
1. The entire channel is completed;
|
||||
|
||||
需求:
|
||||
在家里能够通过访问VPS的28080端口访问到公司机器A的80端口
|
||||
#### **4.2.TCP common usage**
|
||||
Background:
|
||||
- The company computer A provides the 80 port of the web service
|
||||
- There is one VPS, which public IP is 22.22.22.22
|
||||
|
||||
步骤:
|
||||
1. 在vps上执行
|
||||
`./proxy tbridge -p ":33080" -C proxy.crt -K proxy.key`
|
||||
`./proxy tserver -r ":28080@:80" -P "127.0.0.1:33080" -C proxy.crt -K proxy.key`
|
||||
Demand:
|
||||
You can access the 80 port of the company's computer by access to VPS's 28080 port when you are at home.
|
||||
|
||||
1. 在公司机器A上面执行
|
||||
`./proxy tclient -P "22.22.22.22:33080" -C proxy.crt -K proxy.key`
|
||||
Procedure:
|
||||
1. Execute on VPS
|
||||
`./proxy bridge -p ":33080" -C proxy.crt -K proxy.key`
|
||||
`./proxy server -r ":28080@:80" -P "127.0.0.1:33080" -C proxy.crt -K proxy.key`
|
||||
|
||||
1. 完成
|
||||
1. Execute on the company's computer A
|
||||
`./proxy client -P "22.22.22.22:33080" -C proxy.crt -K proxy.key`
|
||||
|
||||
#### **4.3、微信接口本地开发**
|
||||
背景:
|
||||
- 自己的笔记本提供了nginx服务80端口
|
||||
- 有VPS一个,公网IP:22.22.22.22
|
||||
1. complete
|
||||
|
||||
需求:
|
||||
在微信的开发帐号的网页回调接口配置里面填写地址:http://22.22.22.22/calback.php
|
||||
然后就可以访问到笔记本的80端口下面的calback.php,如果需要绑定域名,可以用自己的域名
|
||||
比如:wx-dev.xxx.com解析到22.22.22.22,然后在自己笔记本的nginx里
|
||||
配置域名wx-dev.xxx.com到具体的目录即可.
|
||||
#### **4.3.Local development of WeChat interface**
|
||||
Background:
|
||||
- My own computer provides the 80 port of nginx service
|
||||
- There is one VPS, which public IP is 22.22.22.22
|
||||
|
||||
Demand:
|
||||
Fill out the Web callback interface configuration address of WeChat Development Account: http://22.22.22.22/calback.php
|
||||
Then you can access the calback.php under the 80 port of the computer, and if you need to bind the domain name, you can use your own domain name.
|
||||
for example: Wx-dev.xxx.com is resolved to 22.22.22.22, and then configure the domain name wx-dev.xxx.com into a specific directory in the nginx of your own computer.
|
||||
|
||||
|
||||
步骤:
|
||||
1. 在vps上执行,确保vps的80端口没被其它程序占用.
|
||||
`./proxy tbridge -p ":33080" -C proxy.crt -K proxy.key`
|
||||
`./proxy tserver -r ":80@:80" -P "22.22.22.22:33080" -C proxy.crt -K proxy.key`
|
||||
Procedure:
|
||||
1. Execute on VPS and ensure that the 80 port of VPS is not occupied by other programs.
|
||||
`./proxy bridge -p ":33080" -C proxy.crt -K proxy.key`
|
||||
`./proxy server -r ":80@:80" -P "22.22.22.22:33080" -C proxy.crt -K proxy.key`
|
||||
|
||||
1. 在自己笔记本上面执行
|
||||
`./proxy tclient -P "22.22.22.22:33080" -C proxy.crt -K proxy.key`
|
||||
1. Execute it on your own computer
|
||||
`./proxy client -P "22.22.22.22:33080" -C proxy.crt -K proxy.key`
|
||||
|
||||
1. 完成
|
||||
1. compolete
|
||||
|
||||
#### **4.4、UDP普通用法**
|
||||
背景:
|
||||
- 公司机器A提供了DNS解析服务,UDP:53端口
|
||||
- 有VPS一个,公网IP:22.22.22.22
|
||||
#### **4.4.UDP common usage**
|
||||
Background:
|
||||
- The company computer A provides the DNS resolution, the UDP:53 port.
|
||||
- There is one VPS, which public IP is 22.22.22.22.
|
||||
|
||||
需求:
|
||||
在家里能够通过设置本地dns为22.22.22.22,使用公司机器A进行域名解析服务.
|
||||
Demand:
|
||||
You can use the company computer A for domain name resolution services by setting up local DNS as 22.22.22.22 at home.
|
||||
|
||||
步骤:
|
||||
1. 在vps上执行
|
||||
`./proxy tbridge -p ":33080" -C proxy.crt -K proxy.key`
|
||||
`./proxy tserver --udp -r ":53@:53" -P "127.0.0.1:33080" -C proxy.crt -K proxy.key`
|
||||
Procedure:
|
||||
1. Execute on VPS
|
||||
`./proxy bridge -p ":33080" -C proxy.crt -K proxy.key`
|
||||
`./proxy server --udp -r ":53@:53" -P "127.0.0.1:33080" -C proxy.crt -K proxy.key`
|
||||
|
||||
1. 在公司机器A上面执行
|
||||
`./proxy tclient --udp -P "22.22.22.22:33080" -C proxy.crt -K proxy.key`
|
||||
1. Execute on the company's computer A
|
||||
`./proxy client -P "22.22.22.22:33080" -C proxy.crt -K proxy.key`
|
||||
|
||||
1. 完成
|
||||
1. compolete
|
||||
|
||||
#### **4.5、高级用法一**
|
||||
背景:
|
||||
- 公司机器A提供了web服务80端口
|
||||
- 有VPS一个,公网IP:22.22.22.22
|
||||
#### **4.5.Advanced usage 1**
|
||||
Background:
|
||||
- The company computer A provides the 80 port of the web service
|
||||
- There is one VPS, which public IP is 22.22.22.22
|
||||
|
||||
需求:
|
||||
为了安全,不想在VPS上能够访问到公司机器A,在家里能够通过访问本机的28080端口,
|
||||
通过加密隧道访问到公司机器A的80端口.
|
||||
Demand:
|
||||
For security, it doesn't want to be able to access the company's computer A on VPS. At home, it can access the 80 port of the company's computer A through the encrypted tunnel by accessing the 28080 port of you own computer.
|
||||
|
||||
步骤:
|
||||
1. 在vps上执行
|
||||
`./proxy tbridge -p ":33080" -C proxy.crt -K proxy.key`
|
||||
Procedure:
|
||||
1. Execute on VPS
|
||||
`./proxy bridge -p ":33080" -C proxy.crt -K proxy.key`
|
||||
|
||||
1. 在公司机器A上面执行
|
||||
`./proxy tclient -P "22.22.22.22:33080" -C proxy.crt -K proxy.key`
|
||||
1. Execute on the company's computer A
|
||||
`./proxy client -P "22.22.22.22:33080" -C proxy.crt -K proxy.key`
|
||||
|
||||
1. 在家里电脑上执行
|
||||
`./proxy tserver -r ":28080@:80" -P "22.22.22.22:33080" -C proxy.crt -K proxy.key`
|
||||
1. Execute it on your own computer
|
||||
`./proxy server -r ":28080@:80" -P "22.22.22.22:33080" -C proxy.crt -K proxy.key`
|
||||
|
||||
1. 完成
|
||||
1. compolete
|
||||
|
||||
#### **4.6、高级用法二**
|
||||
提示:
|
||||
如果同时有多个client连接到同一个bridge,需要指定不同的key,可以通过--k参数设定,--k可以是任意唯一字符串,
|
||||
只要在同一个bridge上唯一即可.
|
||||
server连接到bridge的时候,如果同时有多个client连接到同一个bridge,需要使用--k参数选择client.
|
||||
暴露多个端口重复-r参数即可.-r格式是:"本地IP:本地端口@clientHOST:client端口".
|
||||
#### **4.6.Advanced usage 2**
|
||||
Tips:
|
||||
If there are multiple client connected to the same bridge at the same time, you need to specify different key, which can be set by --k parameter. --k must be a unique string on the same bridge.
|
||||
When server is connected to bridge, if multiple client is connected to the same bridge at the same time, you need to use the --k parameter to select client.
|
||||
Repeating -r parameters can expose multiple ports: -r format is "local IP: local port @clientHOST:client port".
|
||||
|
||||
背景:
|
||||
- 公司机器A提供了web服务80端口,ftp服务21端口
|
||||
- 有VPS一个,公网IP:22.22.22.22
|
||||
Background:
|
||||
- The company computer A provides the web service 80 port and the FTP service 21 port
|
||||
- There is one VPS, which public IP is 22.22.22.22
|
||||
|
||||
需求:
|
||||
在家里能够通过访问VPS的28080端口访问到公司机器A的80端口
|
||||
在家里能够通过访问VPS的29090端口访问到公司机器A的21端口
|
||||
Demand:
|
||||
You can access the 80 port of the company's computer by access to VPS's 28080 port at home.
|
||||
You can access the 21 port of the company's computer by access to VPS's 29090 port at home.
|
||||
|
||||
步骤:
|
||||
1. 在vps上执行
|
||||
`./proxy tbridge -p ":33080" -C proxy.crt -K proxy.key`
|
||||
`./proxy tserver -r ":28080@:80" -r ":29090@:21" --k test -P "127.0.0.1:33080" -C proxy.crt -K proxy.key`
|
||||
Procedure:
|
||||
1. Execute on VPS
|
||||
`./proxy bridge -p ":33080" -C proxy.crt -K proxy.key`
|
||||
`./proxy server -r ":28080@:80" -r ":29090@:21" --k test -P "127.0.0.1:33080" -C proxy.crt -K proxy.key`
|
||||
|
||||
1. 在公司机器A上面执行
|
||||
`./proxy tclient --k test -P "22.22.22.22:33080" -C proxy.crt -K proxy.key`
|
||||
1. Execute on the company's computer A
|
||||
`./proxy client --k test -P "22.22.22.22:33080" -C proxy.crt -K proxy.key`
|
||||
|
||||
1. 完成
|
||||
1. complete
|
||||
|
||||
#### **4.7.tserver的-r参数**
|
||||
-r完整格式是:`PROTOCOL://LOCAL_IP:LOCAL_PORT@[CLIENT_KEY]CLIENT_LOCAL_HOST:CLIENT_LOCAL_PORT`
|
||||
#### **4.7.-r parameters of server**
|
||||
The full format of the -r is:`PROTOCOL://LOCAL_IP:LOCAL_PORT@[CLIENT_KEY]CLIENT_LOCAL_HOST:CLIENT_LOCAL_PORT`
|
||||
|
||||
4.7.1.协议PROTOCOL:tcp或者udp.
|
||||
比如: `-r "udp://:10053@:53" -r "tcp://:10800@:1080" -r ":8080@:80"`
|
||||
如果指定了--udp参数,PROTOCOL默认为udp,那么:`-r ":8080@:80"`默认为udp;
|
||||
如果没有指定--udp参数,PROTOCOL默认为tcp,那么:`-r ":8080@:80"`默认为tcp;
|
||||
4.7.1.PROTOCOL is tcp or udp.
|
||||
for example: `-r "udp://:10053@:53" -r "tcp://:10800@:1080" -r ":8080@:80"`
|
||||
If the --udp parameter is specified, PROTOCOL is UDP by default, then `-r ": 8080@: 80"` is UDP.
|
||||
If the --udp parameter is not specified, PROTOCOL is TCP by default, then `-r ": 8080@: 80"` is TCP.
|
||||
|
||||
4.7.2.CLIENT_KEY:默认是default.
|
||||
比如: -r "udp://:10053@[test1]:53" -r "tcp://:10800@[test2]:1080" -r ":8080@:80"
|
||||
如果指定了--k参数,比如--k test,那么:`-r ":8080@:80"`CLIENT_KEY默认为test;
|
||||
如果没有指定--k参数,那么:`-r ":8080@:80"`CLIENT_KEY默认为default;
|
||||
4.7.2.CLIENT_KEY by default is 'default'.
|
||||
for example: -r "udp://:10053@[test1]:53" -r "tcp://:10800@[test2]:1080" -r ":8080@:80"
|
||||
If the --k parameter is specified, such as --k test, then `-r ":8080@:80"` CLIENT_KEY is 'test'.
|
||||
If the --k parameter is not specified,then `-r ":8080@:80"`CLIENT_KEY is 'default'.
|
||||
|
||||
4.7.3.LOCAL_IP为空默认是:`0.0.0.0`,CLIENT_LOCAL_HOST为空默认是:`127.0.0.1`;
|
||||
4.7.3.LOCAL_IP is empty which means LOCAL_IP is `0.0.0.0`, CLIENT_LOCAL_HOST is empty which means LOCAL_IP is `127.0.0.1`.
|
||||
|
||||
#### **4.8.查看帮助**
|
||||
`./proxy help tbridge`
|
||||
`./proxy help tserver`
|
||||
`./proxy help tserver`
|
||||
#### **4.8.view help**
|
||||
`./proxy help bridge`
|
||||
`./proxy help server`
|
||||
`./proxy help server`
|
||||
|
||||
### 5.SOCKS5代理
|
||||
提示:SOCKS5代理,支持CONNECT,UDP协议,不支持BIND,支持用户名密码认证.
|
||||
#### **5.1.普通SOCKS5代理**
|
||||
### **5.SOCKS5 proxy**
|
||||
Tips: SOCKS5 proxy, support CONNECT, UDP protocol and don't support BIND and support username password authentication.
|
||||
#### **5.1.Common SOCKS5 proxy**
|
||||
`./proxy socks -t tcp -p "0.0.0.0:38080"`
|
||||
|
||||
#### **5.2.普通二级SOCKS5代理**
|
||||
使用本地端口8090,假设上级SOCKS5代理是`22.22.22.22:8080`
|
||||
#### **5.2.Common SOCKS5 second level proxy**
|
||||
Using local port 8090, assume that the parent SOCKS5 proxy is `22.22.22.22:8080`
|
||||
`./proxy socks -t tcp -p "0.0.0.0:8090" -T tcp -P "22.22.22.22:8080" `
|
||||
我们还可以指定网站域名的黑白名单文件,一行一个域名,怕匹配规则是最右批评匹配,比如:baidu.com,匹配的是*.*.baidu.com,黑名单的域名域名直接走上级代理,白名单的域名不走上级代理.
|
||||
We can also specify the black and white list files of the domain name, one line for one domain name. The matching rule is the most right-hand matching. For example, baidu.com is *.*.baidu.com, the domain name of the blacklist is directly accessed by the parent proxy, and the domain name of the white list does not access to the parent proxy.
|
||||
`./proxy socks -p "0.0.0.0:8090" -T tcp -P "22.22.22.22:8080" -b blocked.txt -d direct.txt`
|
||||
|
||||
#### **5.3.SOCKS二级代理(加密)**
|
||||
一级SOCKS代理(VPS,IP:22.22.22.22)
|
||||
#### **5.3.SOCKS second level encrypted proxy**
|
||||
SOCKS5 first level proxy(VPS,IP:22.22.22.22)
|
||||
`./proxy socks -t tls -p ":38080" -C proxy.crt -K proxy.key`
|
||||
|
||||
二级SOCKS代理(本地Linux)
|
||||
SOCKS5 second level proxy(local Linux)
|
||||
`./proxy socks -t tcp -p ":8080" -T tls -P "22.22.22.22:38080" -C proxy.crt -K proxy.key`
|
||||
那么访问本地的8080端口就是访问VPS上面的代理端口38080.
|
||||
Then access to the local 8080 port is access to the proxy port 38080 above VPS.
|
||||
|
||||
二级SOCKS代理(本地windows)
|
||||
SOCKS5 second level proxy(local windows)
|
||||
`./proxy.exe socks -t tcp -p ":8080" -T tls -P "22.22.22.22:38080" -C proxy.crt -K proxy.key`
|
||||
然后设置你的windos系统中,需要通过代理上网的程序的代理为socks5模式,地址为:127.0.0.1,端口为:8080,程序即可通过加密通道通过vps上网。
|
||||
Then set up your windows system, the proxy that needs to surf the Internet by proxy is Socks5 mode, the address is: 127.0.0.1, the port is: 8080. the program can surf the Internet through the encrypted channel which is running on VPS.
|
||||
|
||||
#### **5.4.SOCKS三级代理(加密)**
|
||||
一级SOCKS代理VPS_01,IP:22.22.22.22
|
||||
#### **5.4.SOCKS third level encrypted proxy**
|
||||
SOCKS5 first level proxy VPS_01,IP:22.22.22.22
|
||||
`./proxy socks -t tls -p ":38080" -C proxy.crt -K proxy.key`
|
||||
二级SOCKS代理VPS_02,IP:33.33.33.33
|
||||
SOCKS5 second level proxy VPS_02,IP:33.33.33.33
|
||||
`./proxy socks -t tls -p ":28080" -T tls -P "22.22.22.22:38080" -C proxy.crt -K proxy.key`
|
||||
三级SOCKS代理(本地)
|
||||
SOCKS5 third level proxy(local)
|
||||
`./proxy socks -t tcp -p ":8080" -T tls -P "33.33.33.33:28080" -C proxy.crt -K proxy.key`
|
||||
那么访问本地的8080端口就是访问一级SOCKS代理上面的代理端口38080.
|
||||
Then access to the local 8080 port is access to the proxy port 38080 above the SOCKS first level proxy.
|
||||
|
||||
#### **5.5.SOCKS代理流量强制走上级SOCKS代理**
|
||||
默认情况下,proxy会智能判断一个网站域名是否无法访问,如果无法访问才走上级SOCKS代理.通过--always可以使全部SOCKS代理流量强制走上级SOCKS代理.
|
||||
#### **5.5.SOCKS proxy traffic force to go to parent socks proxy**
|
||||
By default, proxy will intelligently judge whether a domain name can be accessed. If it cannot be accessed, it will go to parent SOCKS proxy. Through --always parameter, all SOCKS proxy traffic can be coercion to the parent SOCKS proxy.
|
||||
`./proxy socks --always -t tls -p ":28080" -T tls -P "22.22.22.22:38080" -C proxy.crt -K proxy.key`
|
||||
|
||||
#### **5.6.SOCKS通过SSH中转**
|
||||
说明:ssh中转的原理是利用了ssh的转发功能,就是你连接上ssh之后,可以通过ssh代理访问目标地址.
|
||||
假设有:vps
|
||||
- IP是2.2.2.2, ssh端口是22, ssh用户名是:user, ssh用户密码是:demo
|
||||
- 用户user的ssh私钥名称是user.key
|
||||
#### **5.6.Transfer through SSH**
|
||||
Explanation: the principle of SSH transfer is to take advantage of SSH's forwarding function, which is, after you connect to SSH, you can access the target address by the SSH.
|
||||
Suppose there is a vps
|
||||
- IP is 2.2.2.2, SSH port is 22, SSH username is user, SSH password is Demo
|
||||
- The SSH private key name of the user is user.key
|
||||
|
||||
##### ***5.6.1 ssh用户名和密码的方式***
|
||||
本地SOCKS5代理28080端口,执行:
|
||||
##### ***5.6.1.The way of username and password***
|
||||
Local SOCKS5 proxy 28080 port, execute:
|
||||
`./proxy socks -T ssh -P "2.2.2.2:22" -u user -A demo -t tcp -p ":28080"`
|
||||
##### ***5.6.2 ssh用户名和密钥的方式***
|
||||
本地SOCKS5代理28080端口,执行:
|
||||
##### ***5.6.2.The way of username and key***
|
||||
Local SOCKS5 proxy 28080 port, execute:
|
||||
`./proxy socks -T ssh -P "2.2.2.2:22" -u user -S user.key -t tcp -p ":28080"`
|
||||
|
||||
那么访问本地的28080端口就是通过VPS访问目标地址.
|
||||
Then access to the local 28080 port is to access the target address through VPS.
|
||||
|
||||
#### **5.7.认证**
|
||||
对于socks5代理协议我们可以进行用户名密码认证,认证的用户名和密码可以在命令行指定
|
||||
#### **5.7.Authentication**
|
||||
For socks5 proxy protocol we can use username and password authentication, username and password authentication can be specified on the command line.
|
||||
`./proxy socks -t tcp -p ":33080" -a "user1:pass1" -a "user2:pass2"`
|
||||
多个用户,重复-a参数即可.
|
||||
也可以放在文件中,格式是一行一个"用户名:密码",然后用-F指定.
|
||||
If you need multiple users, repeat the -a parameters.
|
||||
You can also be placed in a file, which is a line, a ‘username: password’, and then specified in -F.
|
||||
`./proxy socks -t tcp -p ":33080" -F auth-file.txt`
|
||||
如果没有-a或-F参数,就是关闭认证.
|
||||
|
||||
#### **5.8.KCP协议传输**
|
||||
KCP协议需要-B参数设置一个密码用于加密解密数据
|
||||
In addition, socks5 proxy also integrates external HTTP API authentication, we can specify a http url interface address through the --auth-url parameter,
|
||||
Then when the user is connected, the proxy GET request this url, with the following four parameters, if the return HTTP status code 204, on behalf of the authentication is successful.
|
||||
In other cases, the authentication fails.
|
||||
for example:
|
||||
`./proxy socks -t tcp -p ":33080" --auth-url "http://test.com/auth.php"`
|
||||
When the user is connected, the proxy will request this URL ("http://test.com/auth.php") by GET way.
|
||||
With user, pass, IP, three parameters:
|
||||
http://test.com/auth.php?user={USER}&pass={PASS}&ip={IP}
|
||||
user:username
|
||||
pass:password
|
||||
ip: user's IP, for example: 192.168.1.200
|
||||
|
||||
一级HTTP代理(VPS,IP:22.22.22.22)
|
||||
If there is no -a or -F or --auth-url parameters, it means to turn off the authentication.
|
||||
|
||||
#### **5.8.KCP protocol transmission**
|
||||
The KCP protocol requires a -B parameter to set a password to encrypt and decrypt data.
|
||||
|
||||
HTTP first level proxy(VPS,IP:22.22.22.22)
|
||||
`./proxy socks -t kcp -p ":38080" -B mypassword`
|
||||
|
||||
二级HTTP代理(本地Linux)
|
||||
HTTP two level proxy(local os is Linux)
|
||||
`./proxy socks -t tcp -p ":8080" -T kcp -P "22.22.22.22:38080" -B mypassword`
|
||||
那么访问本地的8080端口就是访问VPS上面的代理端口38080,数据通过kcp协议传输.
|
||||
Then access to the local 8080 port is access to the proxy port 38080 on the VPS, and the data is transmitted through the KCP protocol.
|
||||
|
||||
#### **5.9.查看帮助**
|
||||
#### **5.9.view help**
|
||||
`./proxy help socks`
|
||||
|
||||
### TODO
|
||||
- http,socks代理多个上级负载均衡?
|
||||
- 内网穿透server<->bridge心跳机制?
|
||||
- 欢迎加群反馈...
|
||||
- Welcome adding group feedback...
|
||||
|
||||
### 如何使用源码?
|
||||
cd进入你的go src目录,然后git clone https://github.com/snail007/goproxy.git ./proxy 即可.
|
||||
编译直接:go build
|
||||
运行: go run *.go
|
||||
utils是工具包,service是具体的每个服务类.
|
||||
### How to use the source code?
|
||||
use command cd to enter your go SRC directory and then git clone https://github.com/snail007/goproxy.git and execute ./proxy.
|
||||
Direct compilation: go build
|
||||
execution: go run *.go
|
||||
Utils is a toolkit, and service is a specific service class.
|
||||
|
||||
### License
|
||||
Proxy is licensed under GPLv3 license.
|
||||
### Contact
|
||||
QQ交流群:189618940
|
||||
proxy QQ group:189618940
|
||||
|
||||
### Donation
|
||||
if proxy help you a lot,you can support us by:
|
||||
### AliPay
|
||||
<img src="https://github.com/snail007/goproxy/blob/master/docs/images/alipay.jpg?raw=true" width="200"/>
|
||||
|
||||
### Wechat Pay
|
||||
<img src="https://github.com/snail007/goproxy/blob/master/docs/images/wxpay.jpg?raw=true" width="200"/>
|
||||
|
||||
|
||||
|
||||
|
||||
676
README_ZH.md
Normal file
@ -0,0 +1,676 @@
|
||||
<img src="https://github.com/snail007/goproxy/blob/master/docs/images/logo.jpg?raw=true" width="200"/>
|
||||
Proxy是golang实现的高性能http,https,websocket,tcp,udp,socks5代理服务器,支持正向代理、反向代理、透明代理、内网穿透、TCP/UDP端口映射、SSH中转,TLS加密传输。下载地址:https://github.com/snail007/goproxy/releases 官方QQ交流群:189618940
|
||||
|
||||
---
|
||||
|
||||
[](https://github.com/snail007/goproxy/) []() [](https://github.com/snail007/goproxy/releases) [](https://github.com/snail007/goproxy/releases)
|
||||
|
||||
[English Manual](/README.md)
|
||||
|
||||
### Features
|
||||
- 链式代理,程序本身可以作为一级代理,如果设置了上级代理那么可以作为二级代理,乃至N级代理.
|
||||
- 通讯加密,如果程序不是一级代理,而且上级代理也是本程序,那么可以加密和上级代理之间的通讯,采用底层tls高强度加密,安全无特征.
|
||||
- 智能HTTP,SOCKS5代理,会自动判断访问的网站是否屏蔽,如果被屏蔽那么就会使用上级代理(前提是配置了上级代理)访问网站;如果访问的网站没有被屏蔽,为了加速访问,代理会直接访问网站,不使用上级代理.
|
||||
- 域名黑白名单,更加自由的控制网站的访问方式。
|
||||
- 跨平台性,无论你是widows,linux,还是mac,甚至是树莓派,都可以很好的运行proxy.
|
||||
- 多协议支持,支持HTTP(S),TCP,UDP,Websocket,SOCKS5代理.
|
||||
- TCP/UDP端口转发.
|
||||
- 支持内网穿透,协议支持TCP和UDP.
|
||||
- SSH中转,HTTP(S),SOCKS5代理支持SSH中转,上级Linux服务器不需要任何服务端,本地一个proxy即可开心上网.
|
||||
- [KCP](https://github.com/xtaci/kcp-go)协议支持,HTTP(S),SOCKS5代理支持KCP协议传输数据,降低延迟,提升浏览体验.
|
||||
- 集成外部API,HTTP(S),SOCKS5代理认证功能可以与外部HTTP API集成,可以方便的通过外部系统控制代理用户.
|
||||
- 反向代理,支持直接把域名解析到proxy监听的ip,然后proxy就会帮你代理访问需要访问的HTTP(S)网站.
|
||||
- 透明HTTP(S)代理,配合iptables,在网关直接把出去的80,443方向的流量转发到proxy,就能实现无感知的智能路由器代理.
|
||||
|
||||
### Why need these?
|
||||
- 当由于某某原因,我们不能访问我们在其它地方的服务,我们可以通过多个相连的proxy节点建立起一个安全的隧道访问我们的服务.
|
||||
- 微信接口本地开发,方便调试.
|
||||
- 远程访问内网机器.
|
||||
- 和小伙伴一起玩局域网游戏.
|
||||
- 以前只能在局域网玩的,现在可以在任何地方玩.
|
||||
- 替代圣剑内网通,显IP内网通,花生壳之类的工具.
|
||||
- ...
|
||||
|
||||
|
||||
本页是v4.2手册,其他版本手册请点击下面链接查看.
|
||||
- [v4.0-v4.1手册](https://github.com/snail007/goproxy/tree/v4.1)
|
||||
- [v3.9手册](https://github.com/snail007/goproxy/tree/v3.9)
|
||||
- [v3.8手册](https://github.com/snail007/goproxy/tree/v3.8)
|
||||
- [v3.6-v3.7手册](https://github.com/snail007/goproxy/tree/v3.6)
|
||||
- [v3.5手册](https://github.com/snail007/goproxy/tree/v3.5)
|
||||
- [v3.4手册](https://github.com/snail007/goproxy/tree/v3.4)
|
||||
- [v3.3手册](https://github.com/snail007/goproxy/tree/v3.3)
|
||||
- [v3.2手册](https://github.com/snail007/goproxy/tree/v3.2)
|
||||
- [v3.1手册](https://github.com/snail007/goproxy/tree/v3.1)
|
||||
- [v3.0手册](https://github.com/snail007/goproxy/tree/v3.0)
|
||||
- [v2.x手册](https://github.com/snail007/goproxy/tree/v2.2)
|
||||
|
||||
### 怎么找到组织?
|
||||
[点击加入交流组织](https://gitter.im/go-proxy/Lobby?utm_source=share-link&utm_medium=link&utm_campaign=share-link)
|
||||
|
||||
### 安装
|
||||
1. [快速安装](#自动安装)
|
||||
1. [手动安装](#手动安装)
|
||||
|
||||
### 首次使用必看
|
||||
- [环境](#首次使用必看-1)
|
||||
- [使用配置文件](#使用配置文件)
|
||||
- [调试输出](#调试输出)
|
||||
- [使用日志文件](#使用日志文件)
|
||||
- [后台运行](#后台运行)
|
||||
- [守护运行](#守护运行)
|
||||
- [生成通讯证书文件](#生成加密通讯需要的证书文件)
|
||||
- [安全建议](#安全建议)
|
||||
|
||||
### 手册目录
|
||||
- [1. HTTP代理](#1http代理)
|
||||
- [1.1 普通HTTP代理](#11普通http代理)
|
||||
- [1.2 普通二级HTTP代理](#12普通二级http代理)
|
||||
- [1.3 HTTP二级代理(加密)](#13http二级代理加密)
|
||||
- [1.4 HTTP三级代理(加密)](#14http三级代理加密)
|
||||
- [1.5 Basic认证](#15basic认证)
|
||||
- [1.6 强制走上级HTTP代理](#16http代理流量强制走上级http代理)
|
||||
- [1.7 通过SSH中转](#17https通过ssh中转)
|
||||
- [1.7.1 用户名和密码的方式](#171-ssh用户名和密码的方式)
|
||||
- [1.7.2 用户名和密钥的方式](#172-ssh用户名和密钥的方式)
|
||||
- [1.8 KCP协议传输](#18kcp协议传输)
|
||||
- [1.9 HTTP(S)反向代理](#19-https反向代理)
|
||||
- [1.10 HTTP(S)透明代理](#110-https透明代理)
|
||||
- [1.11 查看帮助](#111查看帮助)
|
||||
- [2. TCP代理](#2tcp代理)
|
||||
- [2.1 普通一级TCP代理](#21普通一级tcp代理)
|
||||
- [2.2 普通二级TCP代理](#22普通二级tcp代理)
|
||||
- [2.3 普通三级TCP代理](#23普通三级tcp代理)
|
||||
- [2.4 加密二级TCP代理](#24加密二级tcp代理)
|
||||
- [2.5 加密三级TCP代理](#25加密三级tcp代理)
|
||||
- [2.6 查看帮助](#26查看帮助)
|
||||
- [3. UDP代理](#3udp代理)
|
||||
- [3.1 普通一级UDP代理](#31普通一级udp代理)
|
||||
- [3.2 普通二级UDP代理](#32普通二级udp代理)
|
||||
- [3.3 普通三级UDP代理](#33普通三级udp代理)
|
||||
- [3.4 加密二级UDP代理](#34加密二级udp代理)
|
||||
- [3.5 加密三级UDP代理](#35加密三级udp代理)
|
||||
- [3.6 查看帮助](#36查看帮助)
|
||||
- [4. 内网穿透](#4内网穿透)
|
||||
- [4.1 原理说明](#41原理说明)
|
||||
- [4.2 TCP普通用法](#42tcp普通用法)
|
||||
- [4.3 微信接口本地开发](#43微信接口本地开发)
|
||||
- [4.4 UDP普通用法](#44udp普通用法)
|
||||
- [4.5 高级用法一](#45高级用法一)
|
||||
- [4.6 高级用法一](#46高级用法二)
|
||||
- [4.7 server的-r参数](#47server的-r参数)
|
||||
- [4.8 查看帮助](#48查看帮助)
|
||||
- [5. SOCKS5代理](#5socks5代理)
|
||||
- [5.1 普通SOCKS5代理](#51普通socks5代理)
|
||||
- [5.2 普通二级SOCKS5代理](#52普通二级socks5代理)
|
||||
- [5.3 SOCKS二级代理(加密)](#53socks二级代理加密)
|
||||
- [5.4 SOCKS三级代理(加密)](#54socks三级代理加密)
|
||||
- [5.5 流量强制走上级SOCKS代理](#55socks代理流量强制走上级socks代理)
|
||||
- [5.6 通过SSH中转](#56socks通过ssh中转)
|
||||
- [5.6.1 用户名和密码的方式](#561-ssh用户名和密码的方式)
|
||||
- [5.6.2 用户名和密钥的方式](#562-ssh用户名和密钥的方式)
|
||||
- [5.7 认证](#57认证)
|
||||
- [5.8 KCP协议传输](#58kcp协议传输)
|
||||
- [5.9 查看帮助](#59查看帮助)
|
||||
|
||||
### Fast Start
|
||||
提示:所有操作需要root权限.
|
||||
#### 自动安装
|
||||
#### **0.如果你的VPS是linux64位的系统,那么只需要执行下面一句,就可以完成自动安装和配置.**
|
||||
```shell
|
||||
curl -L https://raw.githubusercontent.com/snail007/goproxy/master/install_auto.sh | bash
|
||||
```
|
||||
安装完成,配置目录是/etc/proxy,更详细的使用方法参考下面的进一步了解.
|
||||
如果安装失败或者你的vps不是linux64位系统,请按照下面的半自动步骤安装:
|
||||
|
||||
#### 手动安装
|
||||
|
||||
#### **1.下载proxy**
|
||||
下载地址:https://github.com/snail007/goproxy/releases
|
||||
```shell
|
||||
cd /root/proxy/
|
||||
wget https://github.com/snail007/goproxy/releases/download/v4.2/proxy-linux-amd64.tar.gz
|
||||
```
|
||||
#### **2.下载自动安装脚本**
|
||||
```shell
|
||||
cd /root/proxy/
|
||||
wget https://raw.githubusercontent.com/snail007/goproxy/master/install.sh
|
||||
chmod +x install.sh
|
||||
./install.sh
|
||||
```
|
||||
|
||||
## **首次使用必看**
|
||||
|
||||
### **环境**
|
||||
接下来的教程,默认系统是linux,程序是proxy;所有操作需要root权限;
|
||||
如果你的是windows,请使用windows版本的proxy.exe即可.
|
||||
|
||||
### **使用配置文件**
|
||||
接下来的教程都是通过命令行参数介绍使用方法,也可以通过读取配置文件获取参数.
|
||||
具体格式是通过@符号指定配置文件,例如:./proxy @configfile.txt
|
||||
configfile.txt里面的格式是,第一行是子命令名称,第二行开始一行一个:参数的长格式=参数值,前后不能有空格和双引号.
|
||||
参数的长格式都是--开头的,短格式参数都是-开头,如果你不知道某个短格式参数对应长格式参数,查看帮助命令即可.
|
||||
比如configfile.txt内容如下:
|
||||
```shell
|
||||
http
|
||||
--local-type=tcp
|
||||
--local=:33080
|
||||
```
|
||||
### **调试输出**
|
||||
默认情况下,日志输出的信息不包含文件行数,某些情况下为了排除程序问题,快速定位问题,
|
||||
可以使用--debug参数,输出代码行数和毫秒时间.
|
||||
|
||||
### **使用日志文件**
|
||||
默认情况下,日志是直接在控制台显示出来的,如果要保存到文件,可以使用--log参数,
|
||||
比如: --log proxy.log,日志就会输出到proxy.log方便排除问题.
|
||||
|
||||
|
||||
### **生成加密通讯需要的证书文件**
|
||||
http,tcp,udp代理过程会和上级通讯,为了安全我们采用加密通讯,当然可以选择不加密通信通讯,本教程所有和上级通讯都采用加密,需要证书文件.
|
||||
在linux上并安装了openssl命令,可以直接通过下面的命令生成证书和key文件.
|
||||
`./proxy keygen`
|
||||
默认会在当前程序目录下面生成证书文件proxy.crt和key文件proxy.key。
|
||||
|
||||
### **后台运行**
|
||||
默认执行proxy之后,如果要保持proxy运行,不能关闭命令行.
|
||||
如果想在后台运行proxy,命令行可以关闭,只需要在命令最后加上--daemon参数即可.
|
||||
比如:
|
||||
`./proxy http -t tcp -p "0.0.0.0:38080" --daemon`
|
||||
|
||||
### **守护运行**
|
||||
守护运行参数--forever,比如: `proxy http --forever` ,
|
||||
proxy会fork子进程,然后监控子进程,如果子进程异常退出,5秒后重启子进程.
|
||||
该参数配合后台运行参数--daemon和日志参数--log,可以保障proxy一直在后台执行不会因为意外退出,
|
||||
而且可以通过日志文件看到proxy的输出日志内容.
|
||||
比如: `proxy http -p ":9090" --forever --log proxy.log --daemon`
|
||||
|
||||
### **安全建议**
|
||||
当VPS在nat设备后面,vps上网卡IP都是内网IP,这个时候可以通过-g参数添加vps的外网ip防止死循环.
|
||||
假设你的vps外网ip是23.23.23.23,下面命令通过-g参数设置23.23.23.23
|
||||
`./proxy http -g "23.23.23.23"`
|
||||
|
||||
### **1.HTTP代理**
|
||||
#### **1.1.普通HTTP代理**
|
||||

|
||||
`./proxy http -t tcp -p "0.0.0.0:38080"`
|
||||
|
||||
#### **1.2.普通二级HTTP代理**
|
||||
使用本地端口8090,假设上级HTTP代理是`22.22.22.22:8080`
|
||||
`./proxy http -t tcp -p "0.0.0.0:8090" -T tcp -P "22.22.22.22:8080" `
|
||||
默认关闭了连接池,如果要加快访问速度,-L可以开启连接池,10就是连接池大小,0为关闭,
|
||||
开启连接池在网络不好的情况下,稳定不是很好.
|
||||
`./proxy http -t tcp -p "0.0.0.0:8090" -T tcp -P "22.22.22.22:8080" -L 10`
|
||||
我们还可以指定网站域名的黑白名单文件,一行一个域名,匹配规则是最右匹配,比如:baidu.com,匹配的是*.*.baidu.com,黑名单的域名域名直接走上级代理,白名单的域名不走上级代理.
|
||||
`./proxy http -p "0.0.0.0:8090" -T tcp -P "22.22.22.22:8080" -b blocked.txt -d direct.txt`
|
||||
|
||||
#### **1.3.HTTP二级代理(加密)**
|
||||
一级HTTP代理(VPS,IP:22.22.22.22)
|
||||
`./proxy http -t tls -p ":38080" -C proxy.crt -K proxy.key`
|
||||
|
||||
二级HTTP代理(本地Linux)
|
||||
`./proxy http -t tcp -p ":8080" -T tls -P "22.22.22.22:38080" -C proxy.crt -K proxy.key`
|
||||
那么访问本地的8080端口就是访问VPS上面的代理端口38080.
|
||||
|
||||
二级HTTP代理(本地windows)
|
||||
`./proxy.exe http -t tcp -p ":8080" -T tls -P "22.22.22.22:38080" -C proxy.crt -K proxy.key`
|
||||
然后设置你的windos系统中,需要通过代理上网的程序的代理为http模式,地址为:127.0.0.1,端口为:8080,程序即可通过加密通道通过vps上网。
|
||||
|
||||
#### **1.4.HTTP三级代理(加密)**
|
||||
一级HTTP代理VPS_01,IP:22.22.22.22
|
||||
`./proxy http -t tls -p ":38080" -C proxy.crt -K proxy.key`
|
||||
二级HTTP代理VPS_02,IP:33.33.33.33
|
||||
`./proxy http -t tls -p ":28080" -T tls -P "22.22.22.22:38080" -C proxy.crt -K proxy.key`
|
||||
三级HTTP代理(本地)
|
||||
`./proxy http -t tcp -p ":8080" -T tls -P "33.33.33.33:28080" -C proxy.crt -K proxy.key`
|
||||
那么访问本地的8080端口就是访问一级HTTP代理上面的代理端口38080.
|
||||
|
||||
#### **1.5.Basic认证**
|
||||
对于代理HTTP协议我们可以basic进行Basic认证,认证的用户名和密码可以在命令行指定
|
||||
`./proxy http -t tcp -p ":33080" -a "user1:pass1" -a "user2:pass2"`
|
||||
多个用户,重复-a参数即可.
|
||||
也可以放在文件中,格式是一行一个"用户名:密码",然后用-F指定.
|
||||
`./proxy http -t tcp -p ":33080" -F auth-file.txt`
|
||||
|
||||
另外,http(s)代理还集成了外部HTTP API认证,我们可以通过--auth-url参数指定一个http url接口地址,
|
||||
然后有用户连接的时候,proxy会GET方式请求这url,带上下面四个参数,如果返回HTTP状态码204,代表认证成功
|
||||
其它情况认为认证失败.
|
||||
比如:
|
||||
`./proxy http -t tcp -p ":33080" --auth-url "http://test.com/auth.php"`
|
||||
用户连接的时候,proxy会GET方式请求这url("http://test.com/auth.php"),
|
||||
带上user,pass,ip,target四个参数:
|
||||
http://test.com/auth.php?user={USER}&pass={PASS}&ip={IP}&target={TARGET}
|
||||
user:用户名
|
||||
pass:密码
|
||||
ip:用户的IP,比如:192.168.1.200
|
||||
target:用户访问的URL,比如:http://demo.com:80/1.html或https://www.baidu.com:80
|
||||
|
||||
如果没有-a或-F或--auth-url参数,就是关闭Basic认证.
|
||||
|
||||
#### **1.6.HTTP代理流量强制走上级HTTP代理**
|
||||
默认情况下,proxy会智能判断一个网站域名是否无法访问,如果无法访问才走上级HTTP代理.通过--always可以使全部HTTP代理流量强制走上级HTTP代理.
|
||||
`./proxy http --always -t tls -p ":28080" -T tls -P "22.22.22.22:38080" -C proxy.crt -K proxy.key`
|
||||
|
||||
#### **1.7.HTTP(S)通过SSH中转**
|
||||
说明:ssh中转的原理是利用了ssh的转发功能,就是你连接上ssh之后,可以通过ssh代理访问目标地址.
|
||||
假设有:vps
|
||||
- IP是2.2.2.2, ssh端口是22, ssh用户名是:user, ssh用户密码是:demo
|
||||
- 用户user的ssh私钥名称是user.key
|
||||
|
||||
##### ***1.7.1 ssh用户名和密码的方式***
|
||||
本地HTTP(S)代理28080端口,执行:
|
||||
`./proxy http -T ssh -P "2.2.2.2:22" -u user -A demo -t tcp -p ":28080"`
|
||||
##### ***1.7.2 ssh用户名和密钥的方式***
|
||||
本地HTTP(S)代理28080端口,执行:
|
||||
`./proxy http -T ssh -P "2.2.2.2:22" -u user -S user.key -t tcp -p ":28080"`
|
||||
|
||||
#### **1.8.KCP协议传输**
|
||||
KCP协议需要-B参数设置一个密码用于加密解密数据
|
||||
|
||||
一级HTTP代理(VPS,IP:22.22.22.22)
|
||||
`./proxy http -t kcp -p ":38080" -B mypassword`
|
||||
|
||||
二级HTTP代理(本地Linux)
|
||||
`./proxy http -t tcp -p ":8080" -T kcp -P "22.22.22.22:38080" -B mypassword`
|
||||
那么访问本地的8080端口就是访问VPS上面的代理端口38080,数据通过kcp协议传输.
|
||||
|
||||
#### **1.9 HTTP(S)反向代理**
|
||||
proxy不仅支持在其他软件里面通过设置代理的方式,为其他软件提供代理服务,而且支持直接把请求的网站域名解析到proxy监听的ip上,然后proxy监听80和443端口,那么proxy就会自动为你代理访问需要访问的HTTP(S)网站.
|
||||
|
||||
使用方式:
|
||||
在"最后一级proxy代理"的机器上,因为proxy要伪装成所有网站,网站默认的端口HTTP是80,HTTPS是443,让proxy监听80和443端口即可.参数-p多个地址用逗号分割.
|
||||
`./proxy http -t tcp -p :80,:443`
|
||||
|
||||
这个命令就在机器上启动了一个proxy代理,同时监听80和443端口,既可以当作普通的代理使用,也可以直接把需要代理的域名解析到这个机器的IP上.
|
||||
|
||||
如果有上级代理那么参照上面教程设置上级即可,使用方式完全一样.
|
||||
`./proxy http -t tcp -p :80,:443 -T tls -P "2.2.2.2:33080" -C proxy.crt -K proxy.key`
|
||||
|
||||
注意:
|
||||
proxy所在的服务器的DNS解析结果不能受到自定义的解析影响,不然就死循环了.
|
||||
|
||||
#### **1.10 HTTP(S)透明代理**
|
||||
该模式需要具有一定的网络基础,相关概念不懂的请自行搜索解决.
|
||||
假设proxy现在在路由器上运行,启动命令如下:
|
||||
`./proxy http -t tcp -p :33080 -T tls -P "2.2.2.2:33090" -C proxy.crt -K proxy.key`
|
||||
|
||||
然后添加iptables规则,下面是参考规则:
|
||||
```shell
|
||||
#上级proxy服务端服务器IP地址:
|
||||
proxy_server_ip=2.2.2.2
|
||||
|
||||
#路由器运行proxy监听的端口:
|
||||
proxy_local_port=33080
|
||||
|
||||
#下面的就不用修改了
|
||||
#create a new chain named PROXY
|
||||
iptables -t nat -N PROXY
|
||||
|
||||
# Ignore your PROXY server's addresses
|
||||
# It's very IMPORTANT, just be careful.
|
||||
|
||||
iptables -t nat -A PROXY -d $proxy_server_ip -j RETURN
|
||||
|
||||
# Ignore LANs IP address
|
||||
iptables -t nat -A PROXY -d 0.0.0.0/8 -j RETURN
|
||||
iptables -t nat -A PROXY -d 10.0.0.0/8 -j RETURN
|
||||
iptables -t nat -A PROXY -d 127.0.0.0/8 -j RETURN
|
||||
iptables -t nat -A PROXY -d 169.254.0.0/16 -j RETURN
|
||||
iptables -t nat -A PROXY -d 172.16.0.0/12 -j RETURN
|
||||
iptables -t nat -A PROXY -d 192.168.0.0/16 -j RETURN
|
||||
iptables -t nat -A PROXY -d 224.0.0.0/4 -j RETURN
|
||||
iptables -t nat -A PROXY -d 240.0.0.0/4 -j RETURN
|
||||
|
||||
# Anything to port 80 443 should be redirected to PROXY's local port
|
||||
iptables -t nat -A PROXY -p tcp --dport 80 -j REDIRECT --to-ports $proxy_local_port
|
||||
iptables -t nat -A PROXY -p tcp --dport 443 -j REDIRECT --to-ports $proxy_local_port
|
||||
|
||||
# Apply the rules to nat client
|
||||
iptables -t nat -A PREROUTING -p tcp -j PROXY
|
||||
# Apply the rules to localhost
|
||||
iptables -t nat -A OUTPUT -p tcp -j PROXY
|
||||
```
|
||||
- 清空整个链 iptables -F 链名比如iptables -t nat -F PROXY
|
||||
- 删除指定的用户自定义链 iptables -X 链名 比如 iptables -t nat -X PROXY
|
||||
- 从所选链中删除规则 iptables -D 链名 规则详情 比如 iptables -t nat -D PROXY -d 223.223.192.0/255.255.240.0 -j RETURN
|
||||
|
||||
#### **1.9.查看帮助**
|
||||
`./proxy help http`
|
||||
|
||||
### **2.TCP代理**
|
||||
|
||||
#### **2.1.普通一级TCP代理**
|
||||

|
||||
本地执行:
|
||||
`./proxy tcp -p ":33080" -T tcp -P "192.168.22.33:22"`
|
||||
那么访问本地33080端口就是访问192.168.22.33的22端口.
|
||||
|
||||
#### **2.2.普通二级TCP代理**
|
||||

|
||||
VPS(IP:22.22.22.33)执行:
|
||||
`./proxy tcp -p ":33080" -T tcp -P "127.0.0.1:8080"`
|
||||
本地执行:
|
||||
`./proxy tcp -p ":23080" -T tcp -P "22.22.22.33:33080"`
|
||||
那么访问本地23080端口就是访问22.22.22.33的8080端口.
|
||||
|
||||
#### **2.3.普通三级TCP代理**
|
||||
一级TCP代理VPS_01,IP:22.22.22.22
|
||||
`./proxy tcp -p ":38080" -T tcp -P "66.66.66.66:8080"`
|
||||
二级TCP代理VPS_02,IP:33.33.33.33
|
||||
`./proxy tcp -p ":28080" -T tcp -P "22.22.22.22:38080"`
|
||||
三级TCP代理(本地)
|
||||
`./proxy tcp -p ":8080" -T tcp -P "33.33.33.33:28080"`
|
||||
那么访问本地8080端口就是通过加密TCP隧道访问66.66.66.66的8080端口.
|
||||
|
||||
#### **2.4.加密二级TCP代理**
|
||||
VPS(IP:22.22.22.33)执行:
|
||||
`./proxy tcp -t tcp -p ":33080" -T tcp -P "127.0.0.1:8080" -C proxy.crt -K proxy.key`
|
||||
本地执行:
|
||||
`./proxy tcp -p ":23080" -T tls -P "22.22.22.33:33080" -C proxy.crt -K proxy.key`
|
||||
那么访问本地23080端口就是通过加密TCP隧道访问22.22.22.33的8080端口.
|
||||
|
||||
#### **2.5.加密三级TCP代理**
|
||||
一级TCP代理VPS_01,IP:22.22.22.22
|
||||
`./proxy tcp -t tcp -p ":38080" -T tcp -P "66.66.66.66:8080" -C proxy.crt -K proxy.key`
|
||||
二级TCP代理VPS_02,IP:33.33.33.33
|
||||
`./proxy tcp -t tcp -p ":28080" -T tls -P "22.22.22.22:38080" -C proxy.crt -K proxy.key`
|
||||
三级TCP代理(本地)
|
||||
`./proxy tcp -p ":8080" -T tls -P "33.33.33.33:28080" -C proxy.crt -K proxy.key`
|
||||
那么访问本地8080端口就是通过加密TCP隧道访问66.66.66.66的8080端口.
|
||||
|
||||
#### **2.6.查看帮助**
|
||||
`./proxy help tcp`
|
||||
|
||||
### **3.UDP代理**
|
||||
|
||||
#### **3.1.普通一级UDP代理**
|
||||
本地执行:
|
||||
`./proxy udp -p ":5353" -T udp -P "8.8.8.8:53"`
|
||||
那么访问本地UDP:5353端口就是访问8.8.8.8的UDP:53端口.
|
||||
|
||||
#### **3.2.普通二级UDP代理**
|
||||
VPS(IP:22.22.22.33)执行:
|
||||
`./proxy tcp -p ":33080" -T udp -P "8.8.8.8:53"`
|
||||
本地执行:
|
||||
`./proxy udp -p ":5353" -T tcp -P "22.22.22.33:33080"`
|
||||
那么访问本地UDP:5353端口就是通过TCP隧道,通过VPS访问8.8.8.8的UDP:53端口.
|
||||
|
||||
#### **3.3.普通三级UDP代理**
|
||||
一级TCP代理VPS_01,IP:22.22.22.22
|
||||
`./proxy tcp -p ":38080" -T udp -P "8.8.8.8:53"`
|
||||
二级TCP代理VPS_02,IP:33.33.33.33
|
||||
`./proxy tcp -p ":28080" -T tcp -P "22.22.22.22:38080"`
|
||||
三级TCP代理(本地)
|
||||
`./proxy udp -p ":5353" -T tcp -P "33.33.33.33:28080"`
|
||||
那么访问本地5353端口就是通过TCP隧道,通过VPS访问8.8.8.8的53端口.
|
||||
|
||||
#### **3.4.加密二级UDP代理**
|
||||
VPS(IP:22.22.22.33)执行:
|
||||
`./proxy tcp -t tcp -p ":33080" -T udp -P "8.8.8.8:53" -C proxy.crt -K proxy.key`
|
||||
本地执行:
|
||||
`./proxy udp -p ":5353" -T tls -P "22.22.22.33:33080" -C proxy.crt -K proxy.key`
|
||||
那么访问本地UDP:5353端口就是通过加密TCP隧道,通过VPS访问8.8.8.8的UDP:53端口.
|
||||
|
||||
#### **3.5.加密三级UDP代理**
|
||||
一级TCP代理VPS_01,IP:22.22.22.22
|
||||
`./proxy tcp -t tcp -p ":38080" -T udp -P "8.8.8.8:53" -C proxy.crt -K proxy.key`
|
||||
二级TCP代理VPS_02,IP:33.33.33.33
|
||||
`./proxy tcp -t tcp -p ":28080" -T tls -P "22.22.22.22:38080" -C proxy.crt -K proxy.key`
|
||||
三级TCP代理(本地)
|
||||
`./proxy udp -p ":5353" -T tls -P "33.33.33.33:28080" -C proxy.crt -K proxy.key`
|
||||
那么访问本地5353端口就是通过加密TCP隧道,通过VPS_01访问8.8.8.8的53端口.
|
||||
|
||||
#### **3.6.查看帮助**
|
||||
`./proxy help udp`
|
||||
|
||||
### **4.内网穿透**
|
||||
#### **4.1、原理说明**
|
||||
内网穿透,分为两个版本,“多链接版本”和“多路复用版本”,一般像web服务这种不是长时间连接的服务建议用“多链接版本”,如果是要保持长时间连接建议使用“多路复用版本”。
|
||||
1. 多链接版本,对应的子命令是tserver,tclient,tbridge。
|
||||
1. 多路复用版本,对应的子命令是server,client,bridge。
|
||||
1. 多链接版本和多路复用版本的参数和使用方式完全一样。
|
||||
1. **多路复用版本的server,client可以开启压缩传输,参数是--c。**
|
||||
1. **server,client要么都开启压缩,要么都不开启,不能只开一个。**
|
||||
|
||||
下面的教程以“多路复用版本”为例子,说明使用方法。
|
||||
内网穿透由三部分组成:client端,server端,bridge端;client和server主动连接bridge端进行桥接.
|
||||
当用户访问server端,流程是:
|
||||
1. 首先server端主动和bridge端建立连接;
|
||||
1. 然后bridge端通知client端连接bridge端和目标端口;
|
||||
1. 然后client端绑定“client端到bridge端”和“client端到目标端口”的连接;
|
||||
1. 然后bridge端把“client过来的连接”与“server端过来的连接”绑定;
|
||||
1. 整个通道建立完成;
|
||||
|
||||
#### **4.2、TCP普通用法**
|
||||
背景:
|
||||
- 公司机器A提供了web服务80端口
|
||||
- 有VPS一个,公网IP:22.22.22.22
|
||||
|
||||
需求:
|
||||
在家里能够通过访问VPS的28080端口访问到公司机器A的80端口
|
||||
|
||||
步骤:
|
||||
1. 在vps上执行
|
||||
`./proxy bridge -p ":33080" -C proxy.crt -K proxy.key`
|
||||
`./proxy server -r ":28080@:80" -P "127.0.0.1:33080" -C proxy.crt -K proxy.key`
|
||||
|
||||
1. 在公司机器A上面执行
|
||||
`./proxy client -P "22.22.22.22:33080" -C proxy.crt -K proxy.key`
|
||||
|
||||
1. 完成
|
||||
|
||||
#### **4.3、微信接口本地开发**
|
||||
背景:
|
||||
- 自己的笔记本提供了nginx服务80端口
|
||||
- 有VPS一个,公网IP:22.22.22.22
|
||||
|
||||
需求:
|
||||
在微信的开发帐号的网页回调接口配置里面填写地址:http://22.22.22.22/calback.php
|
||||
然后就可以访问到笔记本的80端口下面的calback.php,如果需要绑定域名,可以用自己的域名
|
||||
比如:wx-dev.xxx.com解析到22.22.22.22,然后在自己笔记本的nginx里
|
||||
配置域名wx-dev.xxx.com到具体的目录即可.
|
||||
|
||||
|
||||
步骤:
|
||||
1. 在vps上执行,确保vps的80端口没被其它程序占用.
|
||||
`./proxy bridge -p ":33080" -C proxy.crt -K proxy.key`
|
||||
`./proxy server -r ":80@:80" -P "22.22.22.22:33080" -C proxy.crt -K proxy.key`
|
||||
|
||||
1. 在自己笔记本上面执行
|
||||
`./proxy client -P "22.22.22.22:33080" -C proxy.crt -K proxy.key`
|
||||
|
||||
1. 完成
|
||||
|
||||
#### **4.4、UDP普通用法**
|
||||
背景:
|
||||
- 公司机器A提供了DNS解析服务,UDP:53端口
|
||||
- 有VPS一个,公网IP:22.22.22.22
|
||||
|
||||
需求:
|
||||
在家里能够通过设置本地dns为22.22.22.22,使用公司机器A进行域名解析服务.
|
||||
|
||||
步骤:
|
||||
1. 在vps上执行
|
||||
`./proxy bridge -p ":33080" -C proxy.crt -K proxy.key`
|
||||
`./proxy server --udp -r ":53@:53" -P "127.0.0.1:33080" -C proxy.crt -K proxy.key`
|
||||
|
||||
1. 在公司机器A上面执行
|
||||
`./proxy client -P "22.22.22.22:33080" -C proxy.crt -K proxy.key`
|
||||
|
||||
1. 完成
|
||||
|
||||
#### **4.5、高级用法一**
|
||||
背景:
|
||||
- 公司机器A提供了web服务80端口
|
||||
- 有VPS一个,公网IP:22.22.22.22
|
||||
|
||||
需求:
|
||||
为了安全,不想在VPS上能够访问到公司机器A,在家里能够通过访问本机的28080端口,
|
||||
通过加密隧道访问到公司机器A的80端口.
|
||||
|
||||
步骤:
|
||||
1. 在vps上执行
|
||||
`./proxy bridge -p ":33080" -C proxy.crt -K proxy.key`
|
||||
|
||||
1. 在公司机器A上面执行
|
||||
`./proxy client -P "22.22.22.22:33080" -C proxy.crt -K proxy.key`
|
||||
|
||||
1. 在家里电脑上执行
|
||||
`./proxy server -r ":28080@:80" -P "22.22.22.22:33080" -C proxy.crt -K proxy.key`
|
||||
|
||||
1. 完成
|
||||
|
||||
#### **4.6、高级用法二**
|
||||
提示:
|
||||
如果同时有多个client连接到同一个bridge,需要指定不同的key,可以通过--k参数设定,--k可以是任意唯一字符串,
|
||||
只要在同一个bridge上唯一即可.
|
||||
server连接到bridge的时候,如果同时有多个client连接到同一个bridge,需要使用--k参数选择client.
|
||||
暴露多个端口重复-r参数即可.-r格式是:"本地IP:本地端口@clientHOST:client端口".
|
||||
|
||||
背景:
|
||||
- 公司机器A提供了web服务80端口,ftp服务21端口
|
||||
- 有VPS一个,公网IP:22.22.22.22
|
||||
|
||||
需求:
|
||||
在家里能够通过访问VPS的28080端口访问到公司机器A的80端口
|
||||
在家里能够通过访问VPS的29090端口访问到公司机器A的21端口
|
||||
|
||||
步骤:
|
||||
1. 在vps上执行
|
||||
`./proxy bridge -p ":33080" -C proxy.crt -K proxy.key`
|
||||
`./proxy server -r ":28080@:80" -r ":29090@:21" --k test -P "127.0.0.1:33080" -C proxy.crt -K proxy.key`
|
||||
|
||||
1. 在公司机器A上面执行
|
||||
`./proxy client --k test -P "22.22.22.22:33080" -C proxy.crt -K proxy.key`
|
||||
|
||||
1. 完成
|
||||
|
||||
#### **4.7.server的-r参数**
|
||||
-r完整格式是:`PROTOCOL://LOCAL_IP:LOCAL_PORT@[CLIENT_KEY]CLIENT_LOCAL_HOST:CLIENT_LOCAL_PORT`
|
||||
|
||||
4.7.1.协议PROTOCOL:tcp或者udp.
|
||||
比如: `-r "udp://:10053@:53" -r "tcp://:10800@:1080" -r ":8080@:80"`
|
||||
如果指定了--udp参数,PROTOCOL默认为udp,那么:`-r ":8080@:80"`默认为udp;
|
||||
如果没有指定--udp参数,PROTOCOL默认为tcp,那么:`-r ":8080@:80"`默认为tcp;
|
||||
|
||||
4.7.2.CLIENT_KEY:默认是default.
|
||||
比如: -r "udp://:10053@[test1]:53" -r "tcp://:10800@[test2]:1080" -r ":8080@:80"
|
||||
如果指定了--k参数,比如--k test,那么:`-r ":8080@:80"`CLIENT_KEY默认为test;
|
||||
如果没有指定--k参数,那么:`-r ":8080@:80"`CLIENT_KEY默认为default;
|
||||
|
||||
4.7.3.LOCAL_IP为空默认是:`0.0.0.0`,CLIENT_LOCAL_HOST为空默认是:`127.0.0.1`;
|
||||
|
||||
#### **4.8.查看帮助**
|
||||
`./proxy help bridge`
|
||||
`./proxy help server`
|
||||
`./proxy help server`
|
||||
|
||||
### **5.SOCKS5代理**
|
||||
提示:SOCKS5代理,支持CONNECT,UDP协议,不支持BIND,支持用户名密码认证.
|
||||
#### **5.1.普通SOCKS5代理**
|
||||
`./proxy socks -t tcp -p "0.0.0.0:38080"`
|
||||
|
||||
#### **5.2.普通二级SOCKS5代理**
|
||||

|
||||
使用本地端口8090,假设上级SOCKS5代理是`22.22.22.22:8080`
|
||||
`./proxy socks -t tcp -p "0.0.0.0:8090" -T tcp -P "22.22.22.22:8080" `
|
||||
我们还可以指定网站域名的黑白名单文件,一行一个域名,匹配规则是最右匹配,比如:baidu.com,匹配的是*.*.baidu.com,黑名单的域名域名直接走上级代理,白名单的域名不走上级代理.
|
||||
`./proxy socks -p "0.0.0.0:8090" -T tcp -P "22.22.22.22:8080" -b blocked.txt -d direct.txt`
|
||||
|
||||
#### **5.3.SOCKS二级代理(加密)**
|
||||
一级SOCKS代理(VPS,IP:22.22.22.22)
|
||||
`./proxy socks -t tls -p ":38080" -C proxy.crt -K proxy.key`
|
||||
|
||||
二级SOCKS代理(本地Linux)
|
||||
`./proxy socks -t tcp -p ":8080" -T tls -P "22.22.22.22:38080" -C proxy.crt -K proxy.key`
|
||||
那么访问本地的8080端口就是访问VPS上面的代理端口38080.
|
||||
|
||||
二级SOCKS代理(本地windows)
|
||||
`./proxy.exe socks -t tcp -p ":8080" -T tls -P "22.22.22.22:38080" -C proxy.crt -K proxy.key`
|
||||
然后设置你的windos系统中,需要通过代理上网的程序的代理为socks5模式,地址为:127.0.0.1,端口为:8080,程序即可通过加密通道通过vps上网。
|
||||
|
||||
#### **5.4.SOCKS三级代理(加密)**
|
||||
一级SOCKS代理VPS_01,IP:22.22.22.22
|
||||
`./proxy socks -t tls -p ":38080" -C proxy.crt -K proxy.key`
|
||||
二级SOCKS代理VPS_02,IP:33.33.33.33
|
||||
`./proxy socks -t tls -p ":28080" -T tls -P "22.22.22.22:38080" -C proxy.crt -K proxy.key`
|
||||
三级SOCKS代理(本地)
|
||||
`./proxy socks -t tcp -p ":8080" -T tls -P "33.33.33.33:28080" -C proxy.crt -K proxy.key`
|
||||
那么访问本地的8080端口就是访问一级SOCKS代理上面的代理端口38080.
|
||||
|
||||
#### **5.5.SOCKS代理流量强制走上级SOCKS代理**
|
||||
默认情况下,proxy会智能判断一个网站域名是否无法访问,如果无法访问才走上级SOCKS代理.通过--always可以使全部SOCKS代理流量强制走上级SOCKS代理.
|
||||
`./proxy socks --always -t tls -p ":28080" -T tls -P "22.22.22.22:38080" -C proxy.crt -K proxy.key`
|
||||
|
||||
#### **5.6.SOCKS通过SSH中转**
|
||||
说明:ssh中转的原理是利用了ssh的转发功能,就是你连接上ssh之后,可以通过ssh代理访问目标地址.
|
||||
假设有:vps
|
||||
- IP是2.2.2.2, ssh端口是22, ssh用户名是:user, ssh用户密码是:demo
|
||||
- 用户user的ssh私钥名称是user.key
|
||||
|
||||
##### ***5.6.1 ssh用户名和密码的方式***
|
||||
本地SOCKS5代理28080端口,执行:
|
||||
`./proxy socks -T ssh -P "2.2.2.2:22" -u user -A demo -t tcp -p ":28080"`
|
||||
##### ***5.6.2 ssh用户名和密钥的方式***
|
||||
本地SOCKS5代理28080端口,执行:
|
||||
`./proxy socks -T ssh -P "2.2.2.2:22" -u user -S user.key -t tcp -p ":28080"`
|
||||
|
||||
那么访问本地的28080端口就是通过VPS访问目标地址.
|
||||
|
||||
#### **5.7.认证**
|
||||
对于socks5代理协议我们可以进行用户名密码认证,认证的用户名和密码可以在命令行指定
|
||||
`./proxy socks -t tcp -p ":33080" -a "user1:pass1" -a "user2:pass2"`
|
||||
多个用户,重复-a参数即可.
|
||||
也可以放在文件中,格式是一行一个"用户名:密码",然后用-F指定.
|
||||
`./proxy socks -t tcp -p ":33080" -F auth-file.txt`
|
||||
|
||||
另外,socks5代理还集成了外部HTTP API认证,我们可以通过--auth-url参数指定一个http url接口地址,
|
||||
然后有用户连接的时候,proxy会GET方式请求这url,带上下面四个参数,如果返回HTTP状态码204,代表认证成功
|
||||
其它情况认为认证失败.
|
||||
比如:
|
||||
`./proxy socks -t tcp -p ":33080" --auth-url "http://test.com/auth.php"`
|
||||
用户连接的时候,proxy会GET方式请求这url("http://test.com/auth.php"),
|
||||
带上user,pass,ip,三个参数:
|
||||
http://test.com/auth.php?user={USER}&pass={PASS}&ip={IP}
|
||||
user:用户名
|
||||
pass:密码
|
||||
ip:用户的IP,比如:192.168.1.200
|
||||
|
||||
如果没有-a或-F或--auth-url参数,就是关闭认证.
|
||||
|
||||
#### **5.8.KCP协议传输**
|
||||
KCP协议需要-B参数设置一个密码用于加密解密数据
|
||||
|
||||
一级HTTP代理(VPS,IP:22.22.22.22)
|
||||
`./proxy socks -t kcp -p ":38080" -B mypassword`
|
||||
|
||||
二级HTTP代理(本地Linux)
|
||||
`./proxy socks -t tcp -p ":8080" -T kcp -P "22.22.22.22:38080" -B mypassword`
|
||||
那么访问本地的8080端口就是访问VPS上面的代理端口38080,数据通过kcp协议传输.
|
||||
|
||||
#### **5.9.查看帮助**
|
||||
`./proxy help socks`
|
||||
|
||||
### TODO
|
||||
- http,socks代理多个上级负载均衡?
|
||||
- http(s)代理增加pac支持?
|
||||
- 欢迎加群反馈...
|
||||
|
||||
### 如何使用源码?
|
||||
建议go1.8,不保证>=1.9能用.
|
||||
cd进入你的go src目录,然后git clone https://github.com/snail007/goproxy.git ./proxy 即可.
|
||||
编译直接:go build
|
||||
运行: go run *.go
|
||||
utils是工具包,service是具体的每个服务类.
|
||||
|
||||
### License
|
||||
Proxy is licensed under GPLv3 license.
|
||||
### Contact
|
||||
QQ交流群:189618940
|
||||
|
||||
|
||||
### Donation
|
||||
如果proxy帮助你解决了很多问题,你可以通过下面的捐赠更好的支持proxy.
|
||||
<img src="https://github.com/snail007/goproxy/blob/master/docs/images/alipay.jpg?raw=true" width="200"/>
|
||||
<img src="https://github.com/snail007/goproxy/blob/master/docs/images/wxpay.jpg?raw=true" width="200"/>
|
||||
|
||||
|
||||
132
config.go
@ -1,12 +1,14 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"proxy/services"
|
||||
"proxy/utils"
|
||||
"time"
|
||||
|
||||
kingpin "gopkg.in/alecthomas/kingpin.v2"
|
||||
)
|
||||
@ -14,6 +16,7 @@ import (
|
||||
var (
|
||||
app *kingpin.Application
|
||||
service *services.ServiceItem
|
||||
cmd *exec.Cmd
|
||||
)
|
||||
|
||||
func initConfig() (err error) {
|
||||
@ -31,6 +34,9 @@ func initConfig() (err error) {
|
||||
tunnelServerArgs := services.TunnelServerArgs{}
|
||||
tunnelClientArgs := services.TunnelClientArgs{}
|
||||
tunnelBridgeArgs := services.TunnelBridgeArgs{}
|
||||
muxServerArgs := services.MuxServerArgs{}
|
||||
muxClientArgs := services.MuxClientArgs{}
|
||||
muxBridgeArgs := services.MuxBridgeArgs{}
|
||||
udpArgs := services.UDPArgs{}
|
||||
socksArgs := services.SocksArgs{}
|
||||
//build srvice args
|
||||
@ -38,6 +44,9 @@ func initConfig() (err error) {
|
||||
app.Author("snail").Version(APP_VERSION)
|
||||
debug := app.Flag("debug", "debug log output").Default("false").Bool()
|
||||
daemon := app.Flag("daemon", "run proxy in background").Default("false").Bool()
|
||||
forever := app.Flag("forever", "run proxy in forever,fail and retry").Default("false").Bool()
|
||||
logfile := app.Flag("log", "log file path").Default("").String()
|
||||
|
||||
//########http#########
|
||||
http := app.Command("http", "proxy on http mode")
|
||||
httpArgs.Parent = http.Flag("parent", "parent address, such as: \"23.32.32.19:28008\"").Default("").Short('P').String()
|
||||
@ -55,7 +64,7 @@ func initConfig() (err error) {
|
||||
httpArgs.Auth = http.Flag("auth", "http basic auth username and password, mutiple user repeat -a ,such as: -a user1:pass1 -a user2:pass2").Short('a').Strings()
|
||||
httpArgs.PoolSize = http.Flag("pool-size", "conn pool size , which connect to parent proxy, zero: means turn off pool").Short('L').Default("0").Int()
|
||||
httpArgs.CheckParentInterval = http.Flag("check-parent-interval", "check if proxy is okay every interval seconds,zero: means no check").Short('I').Default("3").Int()
|
||||
httpArgs.Local = http.Flag("local", "local ip:port to listen").Short('p').Default(":33080").String()
|
||||
httpArgs.Local = http.Flag("local", "local ip:port to listen,multiple address use comma split,such as: 0.0.0.0:80,0.0.0.0:443").Short('p').Default(":33080").String()
|
||||
httpArgs.SSHUser = http.Flag("ssh-user", "user for ssh").Short('u').Default("").String()
|
||||
httpArgs.SSHKeyFile = http.Flag("ssh-key", "private key file for ssh").Short('S').Default("").String()
|
||||
httpArgs.SSHKeyFileSalt = http.Flag("ssh-keysalt", "salt of ssh private key").Short('s').Default("").String()
|
||||
@ -63,6 +72,10 @@ func initConfig() (err error) {
|
||||
httpArgs.KCPKey = http.Flag("kcp-key", "key for kcp encrypt/decrypt data").Short('B').Default("encrypt").String()
|
||||
httpArgs.KCPMethod = http.Flag("kcp-method", "kcp encrypt/decrypt method").Short('M').Default("3des").String()
|
||||
httpArgs.LocalIPS = http.Flag("local bind ips", "if your host behind a nat,set your public ip here avoid dead loop").Short('g').Strings()
|
||||
httpArgs.AuthURL = http.Flag("auth-url", "http basic auth username and password will send to this url,response http code equal to 'auth-code' means ok,others means fail.").Default("").String()
|
||||
httpArgs.AuthURLTimeout = http.Flag("auth-timeout", "access 'auth-url' timeout milliseconds").Default("3000").Int()
|
||||
httpArgs.AuthURLOkCode = http.Flag("auth-code", "access 'auth-url' success http code").Default("204").Int()
|
||||
httpArgs.AuthURLRetry = http.Flag("auth-retry", "access 'auth-url' fail and retry count").Default("1").Int()
|
||||
|
||||
//########tcp#########
|
||||
tcp := app.Command("tcp", "proxy on tcp mode")
|
||||
@ -89,6 +102,33 @@ func initConfig() (err error) {
|
||||
udpArgs.CheckParentInterval = udp.Flag("check-parent-interval", "check if proxy is okay every interval seconds,zero: means no check").Short('I').Default("3").Int()
|
||||
udpArgs.Local = udp.Flag("local", "local ip:port to listen").Short('p').Default(":33080").String()
|
||||
|
||||
//########mux-server#########
|
||||
muxServer := app.Command("server", "proxy on mux server mode")
|
||||
muxServerArgs.Parent = muxServer.Flag("parent", "parent address, such as: \"23.32.32.19:28008\"").Default("").Short('P').String()
|
||||
muxServerArgs.CertFile = muxServer.Flag("cert", "cert file for tls").Short('C').Default("proxy.crt").String()
|
||||
muxServerArgs.KeyFile = muxServer.Flag("key", "key file for tls").Short('K').Default("proxy.key").String()
|
||||
muxServerArgs.Timeout = muxServer.Flag("timeout", "tcp timeout with milliseconds").Short('t').Default("2000").Int()
|
||||
muxServerArgs.IsUDP = muxServer.Flag("udp", "proxy on udp mux server mode").Default("false").Bool()
|
||||
muxServerArgs.Key = muxServer.Flag("k", "client key").Default("default").String()
|
||||
muxServerArgs.Route = muxServer.Flag("route", "local route to client's network, such as: PROTOCOL://LOCAL_IP:LOCAL_PORT@[CLIENT_KEY]CLIENT_LOCAL_HOST:CLIENT_LOCAL_PORT").Short('r').Default("").Strings()
|
||||
muxServerArgs.IsCompress = muxServer.Flag("c", "compress data when tcp mode").Default("false").Bool()
|
||||
|
||||
//########mux-client#########
|
||||
muxClient := app.Command("client", "proxy on mux client mode")
|
||||
muxClientArgs.Parent = muxClient.Flag("parent", "parent address, such as: \"23.32.32.19:28008\"").Default("").Short('P').String()
|
||||
muxClientArgs.CertFile = muxClient.Flag("cert", "cert file for tls").Short('C').Default("proxy.crt").String()
|
||||
muxClientArgs.KeyFile = muxClient.Flag("key", "key file for tls").Short('K').Default("proxy.key").String()
|
||||
muxClientArgs.Timeout = muxClient.Flag("timeout", "tcp timeout with milliseconds").Short('t').Default("2000").Int()
|
||||
muxClientArgs.Key = muxClient.Flag("k", "key same with server").Default("default").String()
|
||||
muxClientArgs.IsCompress = muxClient.Flag("c", "compress data when tcp mode").Default("false").Bool()
|
||||
|
||||
//########mux-bridge#########
|
||||
muxBridge := app.Command("bridge", "proxy on mux bridge mode")
|
||||
muxBridgeArgs.CertFile = muxBridge.Flag("cert", "cert file for tls").Short('C').Default("proxy.crt").String()
|
||||
muxBridgeArgs.KeyFile = muxBridge.Flag("key", "key file for tls").Short('K').Default("proxy.key").String()
|
||||
muxBridgeArgs.Timeout = muxBridge.Flag("timeout", "tcp timeout with milliseconds").Short('t').Default("2000").Int()
|
||||
muxBridgeArgs.Local = muxBridge.Flag("local", "local ip:port to listen").Short('p').Default(":33080").String()
|
||||
|
||||
//########tunnel-server#########
|
||||
tunnelServer := app.Command("tserver", "proxy on tunnel server mode")
|
||||
tunnelServerArgs.Parent = tunnelServer.Flag("parent", "parent address, such as: \"23.32.32.19:28008\"").Default("").Short('P').String()
|
||||
@ -97,7 +137,7 @@ func initConfig() (err error) {
|
||||
tunnelServerArgs.Timeout = tunnelServer.Flag("timeout", "tcp timeout with milliseconds").Short('t').Default("2000").Int()
|
||||
tunnelServerArgs.IsUDP = tunnelServer.Flag("udp", "proxy on udp tunnel server mode").Default("false").Bool()
|
||||
tunnelServerArgs.Key = tunnelServer.Flag("k", "client key").Default("default").String()
|
||||
tunnelServerArgs.Route = tunnelServer.Flag("route", "local route to client's network, such as :PROTOCOL://LOCAL_IP:LOCAL_PORT@[CLIENT_KEY]CLIENT_LOCAL_HOST:CLIENT_LOCAL_PORT").Short('r').Default("").Strings()
|
||||
tunnelServerArgs.Route = tunnelServer.Flag("route", "local route to client's network, such as: PROTOCOL://LOCAL_IP:LOCAL_PORT@[CLIENT_KEY]CLIENT_LOCAL_HOST:CLIENT_LOCAL_PORT").Short('r').Default("").Strings()
|
||||
|
||||
//########tunnel-client#########
|
||||
tunnelClient := app.Command("tclient", "proxy on tunnel client mode")
|
||||
@ -138,10 +178,28 @@ func initConfig() (err error) {
|
||||
socksArgs.KCPKey = socks.Flag("kcp-key", "key for kcp encrypt/decrypt data").Short('B').Default("encrypt").String()
|
||||
socksArgs.KCPMethod = socks.Flag("kcp-method", "kcp encrypt/decrypt method").Short('M').Default("3des").String()
|
||||
socksArgs.LocalIPS = socks.Flag("local bind ips", "if your host behind a nat,set your public ip here avoid dead loop").Short('g').Strings()
|
||||
socksArgs.AuthURL = socks.Flag("auth-url", "auth username and password will send to this url,response http code equal to 'auth-code' means ok,others means fail.").Default("").String()
|
||||
socksArgs.AuthURLTimeout = socks.Flag("auth-timeout", "access 'auth-url' timeout milliseconds").Default("3000").Int()
|
||||
socksArgs.AuthURLOkCode = socks.Flag("auth-code", "access 'auth-url' success http code").Default("204").Int()
|
||||
socksArgs.AuthURLRetry = socks.Flag("auth-retry", "access 'auth-url' fail and retry count").Default("0").Int()
|
||||
|
||||
//parse args
|
||||
serviceName := kingpin.MustParse(app.Parse(os.Args[1:]))
|
||||
flags := log.Ldate
|
||||
if *debug {
|
||||
flags |= log.Lshortfile | log.Lmicroseconds
|
||||
} else {
|
||||
flags |= log.Ltime
|
||||
}
|
||||
log.SetFlags(flags)
|
||||
|
||||
if *logfile != "" {
|
||||
f, e := os.OpenFile(*logfile, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600)
|
||||
if e != nil {
|
||||
log.Fatal(e)
|
||||
}
|
||||
log.SetOutput(f)
|
||||
}
|
||||
if *daemon {
|
||||
args := []string{}
|
||||
for _, arg := range os.Args[1:] {
|
||||
@ -149,18 +207,69 @@ func initConfig() (err error) {
|
||||
args = append(args, arg)
|
||||
}
|
||||
}
|
||||
cmd := exec.Command(os.Args[0], args...)
|
||||
cmd = exec.Command(os.Args[0], args...)
|
||||
cmd.Start()
|
||||
fmt.Printf("%s [PID] %d running...\n", os.Args[0], cmd.Process.Pid)
|
||||
f := ""
|
||||
if *forever {
|
||||
f = "forever "
|
||||
}
|
||||
log.Printf("%s%s [PID] %d running...\n", f, os.Args[0], cmd.Process.Pid)
|
||||
os.Exit(0)
|
||||
}
|
||||
if *debug {
|
||||
flags |= log.Lshortfile | log.Lmicroseconds
|
||||
} else {
|
||||
flags |= log.Ltime
|
||||
if *forever {
|
||||
args := []string{}
|
||||
for _, arg := range os.Args[1:] {
|
||||
if arg != "--forever" {
|
||||
args = append(args, arg)
|
||||
}
|
||||
}
|
||||
go func() {
|
||||
for {
|
||||
if cmd != nil {
|
||||
cmd.Process.Kill()
|
||||
}
|
||||
cmd = exec.Command(os.Args[0], args...)
|
||||
cmdReaderStderr, err := cmd.StderrPipe()
|
||||
if err != nil {
|
||||
log.Printf("ERR:%s,restarting...\n", err)
|
||||
continue
|
||||
}
|
||||
cmdReader, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
log.Printf("ERR:%s,restarting...\n", err)
|
||||
continue
|
||||
}
|
||||
scanner := bufio.NewScanner(cmdReader)
|
||||
scannerStdErr := bufio.NewScanner(cmdReaderStderr)
|
||||
go func() {
|
||||
for scanner.Scan() {
|
||||
fmt.Println(scanner.Text())
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
for scannerStdErr.Scan() {
|
||||
fmt.Println(scannerStdErr.Text())
|
||||
}
|
||||
}()
|
||||
if err := cmd.Start(); err != nil {
|
||||
log.Printf("ERR:%s,restarting...\n", err)
|
||||
continue
|
||||
}
|
||||
pid := cmd.Process.Pid
|
||||
log.Printf("worker %s [PID] %d running...\n", os.Args[0], pid)
|
||||
if err := cmd.Wait(); err != nil {
|
||||
log.Printf("ERR:%s,restarting...", err)
|
||||
continue
|
||||
}
|
||||
log.Printf("worker %s [PID] %d unexpected exited, restarting...\n", os.Args[0], pid)
|
||||
time.Sleep(time.Second * 5)
|
||||
}
|
||||
}()
|
||||
return
|
||||
}
|
||||
if *logfile == "" {
|
||||
poster()
|
||||
}
|
||||
log.SetFlags(flags)
|
||||
poster()
|
||||
//regist services and run service
|
||||
services.Regist("http", services.NewHTTP(), httpArgs)
|
||||
services.Regist("tcp", services.NewTCP(), tcpArgs)
|
||||
@ -168,6 +277,9 @@ func initConfig() (err error) {
|
||||
services.Regist("tserver", services.NewTunnelServerManager(), tunnelServerArgs)
|
||||
services.Regist("tclient", services.NewTunnelClient(), tunnelClientArgs)
|
||||
services.Regist("tbridge", services.NewTunnelBridge(), tunnelBridgeArgs)
|
||||
services.Regist("server", services.NewMuxServerManager(), muxServerArgs)
|
||||
services.Regist("client", services.NewMuxClient(), muxClientArgs)
|
||||
services.Regist("bridge", services.NewMuxBridge(), muxBridgeArgs)
|
||||
services.Regist("socks", services.NewSocks(), socksArgs)
|
||||
service, err = services.Run(serviceName)
|
||||
if err != nil {
|
||||
|
||||
BIN
docs/images/1.1.jpg
Normal file
|
After Width: | Height: | Size: 94 KiB |
BIN
docs/images/2.1.png
Normal file
|
After Width: | Height: | Size: 26 KiB |
BIN
docs/images/2.2.png
Normal file
|
After Width: | Height: | Size: 32 KiB |
BIN
docs/images/5.2.png
Normal file
|
After Width: | Height: | Size: 12 KiB |
BIN
docs/images/alipay.jpg
Normal file
|
After Width: | Height: | Size: 39 KiB |
BIN
docs/images/wxpay.jpg
Normal file
|
After Width: | Height: | Size: 24 KiB |
@ -1,12 +1,6 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# install monexec
|
||||
tar zxvf monexec_0.1.1_linux_amd64.tar.gz
|
||||
cd monexec_0.1.1_linux_amd64
|
||||
cp monexec /usr/bin/
|
||||
chmod +x /usr/bin/monexec
|
||||
cd ..
|
||||
# #install proxy
|
||||
tar zxvf proxy-linux-amd64.tar.gz
|
||||
cp proxy /usr/bin/
|
||||
|
||||
@ -5,15 +5,8 @@ if [ -e /tmp/proxy ]; then
|
||||
fi
|
||||
mkdir /tmp/proxy
|
||||
cd /tmp/proxy
|
||||
wget https://github.com/reddec/monexec/releases/download/v0.1.1/monexec_0.1.1_linux_amd64.tar.gz
|
||||
wget https://github.com/snail007/goproxy/releases/download/v3.4/proxy-linux-amd64.tar.gz
|
||||
wget https://github.com/snail007/goproxy/releases/download/v4.2/proxy-linux-amd64.tar.gz
|
||||
|
||||
# install monexec
|
||||
tar zxvf monexec_0.1.1_linux_amd64.tar.gz
|
||||
cd monexec_0.1.1_linux_amd64
|
||||
cp monexec /usr/bin/
|
||||
chmod +x /usr/bin/monexec
|
||||
cd ..
|
||||
# #install proxy
|
||||
tar zxvf proxy-linux-amd64.tar.gz
|
||||
cp proxy /usr/bin/
|
||||
|
||||
19
main.go
@ -1,7 +1,6 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
@ -9,14 +8,18 @@ import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const APP_VERSION = "3.5"
|
||||
const APP_VERSION = "4.2"
|
||||
|
||||
func main() {
|
||||
err := initConfig()
|
||||
if err != nil {
|
||||
log.Fatalf("err : %s", err)
|
||||
}
|
||||
Clean(&service.S)
|
||||
if service != nil && service.S != nil {
|
||||
Clean(&service.S)
|
||||
} else {
|
||||
Clean(nil)
|
||||
}
|
||||
}
|
||||
func Clean(s *services.Service) {
|
||||
signalChan := make(chan os.Signal, 1)
|
||||
@ -29,8 +32,14 @@ func Clean(s *services.Service) {
|
||||
syscall.SIGQUIT)
|
||||
go func() {
|
||||
for _ = range signalChan {
|
||||
fmt.Println("\nReceived an interrupt, stopping services...")
|
||||
(*s).Clean()
|
||||
log.Println("Received an interrupt, stopping services...")
|
||||
if s != nil && *s != nil {
|
||||
(*s).Clean()
|
||||
}
|
||||
if cmd != nil {
|
||||
log.Printf("clean process %d", cmd.Process.Pid)
|
||||
cmd.Process.Kill()
|
||||
}
|
||||
cleanupDone <- true
|
||||
}
|
||||
}()
|
||||
|
||||
83
release.sh
@ -1,5 +1,5 @@
|
||||
#!/bin/bash
|
||||
VER="3.5"
|
||||
VER="4.2"
|
||||
RELEASE="release-${VER}"
|
||||
rm -rf .cert
|
||||
mkdir .cert
|
||||
@ -9,56 +9,59 @@ cd .cert
|
||||
cd ..
|
||||
rm -rf ${RELEASE}
|
||||
mkdir ${RELEASE}
|
||||
export CGO_ENABLED=0
|
||||
#linux
|
||||
GOOS=linux GOARCH=386 go build && tar zcfv "${RELEASE}/proxy-linux-386.tar.gz" proxy direct blocked
|
||||
GOOS=linux GOARCH=amd64 go build && tar zcfv "${RELEASE}/proxy-linux-amd64.tar.gz" proxy direct blocked
|
||||
GOOS=linux GOARCH=arm GOARM=7 go build && tar zcfv "${RELEASE}/proxy-linux-arm.tar.gz" proxy direct blocked
|
||||
GOOS=linux GOARCH=arm64 GOARM=7 go build && tar zcfv "${RELEASE}/proxy-linux-arm64.tar.gz" proxy direct blocked
|
||||
GOOS=linux GOARCH=mips go build && tar zcfv "${RELEASE}/proxy-linux-mips.tar.gz" proxy direct blocked
|
||||
GOOS=linux GOARCH=mips64 go build && tar zcfv "${RELEASE}/proxy-linux-mips64.tar.gz" proxy direct blocked
|
||||
GOOS=linux GOARCH=mips64le go build && tar zcfv "${RELEASE}/proxy-linux-mips64le.tar.gz" proxy direct blocked
|
||||
GOOS=linux GOARCH=mipsle go build && tar zcfv "${RELEASE}/proxy-linux-mipsle.tar.gz" proxy direct blocked
|
||||
GOOS=linux GOARCH=ppc64 go build && tar zcfv "${RELEASE}/proxy-linux-ppc64.tar.gz" proxy direct blocked
|
||||
GOOS=linux GOARCH=ppc64le go build && tar zcfv "${RELEASE}/proxy-linux-ppc64le.tar.gz" proxy direct blocked
|
||||
GOOS=linux GOARCH=s390x go build && tar zcfv "${RELEASE}/proxy-linux-s390x.tar.gz" proxy direct blocked
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=386 go build && tar zcfv "${RELEASE}/proxy-linux-386.tar.gz" proxy direct blocked
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build && tar zcfv "${RELEASE}/proxy-linux-amd64.tar.gz" proxy direct blocked
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm GOARM=6 go build && tar zcfv "${RELEASE}/proxy-linux-arm-v6.tar.gz" proxy direct blocked
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 GOARM=6 go build && tar zcfv "${RELEASE}/proxy-linux-arm64-v6.tar.gz" proxy direct blocked
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm GOARM=7 go build && tar zcfv "${RELEASE}/proxy-linux-arm-v7.tar.gz" proxy direct blocked
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 GOARM=7 go build && tar zcfv "${RELEASE}/proxy-linux-arm64-v7.tar.gz" proxy direct blocked
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm GOARM=5 go build && tar zcfv "${RELEASE}/proxy-linux-arm-v5.tar.gz" proxy direct blocked
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 GOARM=5 go build && tar zcfv "${RELEASE}/proxy-linux-arm64-v5.tar.gz" proxy direct blocked
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=mips go build && tar zcfv "${RELEASE}/proxy-linux-mips.tar.gz" proxy direct blocked
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=mips64 go build && tar zcfv "${RELEASE}/proxy-linux-mips64.tar.gz" proxy direct blocked
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=mips64le go build && tar zcfv "${RELEASE}/proxy-linux-mips64le.tar.gz" proxy direct blocked
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=mipsle go build && tar zcfv "${RELEASE}/proxy-linux-mipsle.tar.gz" proxy direct blocked
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=ppc64 go build && tar zcfv "${RELEASE}/proxy-linux-ppc64.tar.gz" proxy direct blocked
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=ppc64le go build && tar zcfv "${RELEASE}/proxy-linux-ppc64le.tar.gz" proxy direct blocked
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=s390x go build && tar zcfv "${RELEASE}/proxy-linux-s390x.tar.gz" proxy direct blocked
|
||||
#android
|
||||
GOOS=android GOARCH=386 go build && tar zcfv "${RELEASE}/proxy-android-386.tar.gz" proxy direct blocked
|
||||
GOOS=android GOARCH=amd64 go build && tar zcfv "${RELEASE}/proxy-android-amd64.tar.gz" proxy direct blocked
|
||||
GOOS=android GOARCH=arm go build && tar zcfv "${RELEASE}/proxy-android-arm.tar.gz" proxy direct blocked
|
||||
GOOS=android GOARCH=arm64 go build && tar zcfv "${RELEASE}/proxy-android-arm64.tar.gz" proxy direct blocked
|
||||
CGO_ENABLED=0 GOOS=android GOARCH=386 go build && tar zcfv "${RELEASE}/proxy-android-386.tar.gz" proxy direct blocked
|
||||
CGO_ENABLED=0 GOOS=android GOARCH=amd64 go build && tar zcfv "${RELEASE}/proxy-android-amd64.tar.gz" proxy direct blocked
|
||||
CGO_ENABLED=0 GOOS=android GOARCH=arm go build && tar zcfv "${RELEASE}/proxy-android-arm.tar.gz" proxy direct blocked
|
||||
CGO_ENABLED=0 GOOS=android GOARCH=arm64 go build && tar zcfv "${RELEASE}/proxy-android-arm64.tar.gz" proxy direct blocked
|
||||
#darwin
|
||||
GOOS=darwin GOARCH=386 go build go build && tar zcfv "${RELEASE}/proxy-darwin-386.tar.gz" proxy direct blocked
|
||||
GOOS=darwin GOARCH=amd64 go build && tar zcfv "${RELEASE}/proxy-darwin-amd64.tar.gz" proxy direct blocked
|
||||
GOOS=darwin GOARCH=arm go build && tar zcfv "${RELEASE}/proxy-darwin-arm.tar.gz" proxy direct blocked
|
||||
GOOS=darwin GOARCH=arm64 go build && tar zcfv "${RELEASE}/proxy-darwin-arm64.tar.gz" proxy direct blocked
|
||||
CGO_ENABLED=0 GOOS=darwin GOARCH=386 go build && tar zcfv "${RELEASE}/proxy-darwin-386.tar.gz" proxy direct blocked
|
||||
CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 go build && tar zcfv "${RELEASE}/proxy-darwin-amd64.tar.gz" proxy direct blocked
|
||||
CGO_ENABLED=0 GOOS=darwin GOARCH=arm go build && tar zcfv "${RELEASE}/proxy-darwin-arm.tar.gz" proxy direct blocked
|
||||
CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 go build && tar zcfv "${RELEASE}/proxy-darwin-arm64.tar.gz" proxy direct blocked
|
||||
#dragonfly
|
||||
GOOS=dragonfly GOARCH=amd64 go build && tar zcfv "${RELEASE}/proxy-dragonfly-amd64.tar.gz" proxy direct blocked
|
||||
CGO_ENABLED=0 GOOS=dragonfly GOARCH=amd64 go build && tar zcfv "${RELEASE}/proxy-dragonfly-amd64.tar.gz" proxy direct blocked
|
||||
#freebsd
|
||||
GOOS=freebsd GOARCH=386 go build && tar zcfv "${RELEASE}/proxy-freebsd-386.tar.gz" proxy direct blocked
|
||||
GOOS=freebsd GOARCH=amd64 go build && tar zcfv "${RELEASE}/proxy-freebsd-amd64.tar.gz" proxy direct blocked
|
||||
GOOS=freebsd GOARCH=arm go build && tar zcfv "${RELEASE}/proxy-freebsd-arm.tar.gz" proxy direct blocked
|
||||
CGO_ENABLED=0 GOOS=freebsd GOARCH=386 go build && tar zcfv "${RELEASE}/proxy-freebsd-386.tar.gz" proxy direct blocked
|
||||
CGO_ENABLED=0 GOOS=freebsd GOARCH=amd64 go build && tar zcfv "${RELEASE}/proxy-freebsd-amd64.tar.gz" proxy direct blocked
|
||||
CGO_ENABLED=0 GOOS=freebsd GOARCH=arm go build && tar zcfv "${RELEASE}/proxy-freebsd-arm.tar.gz" proxy direct blocked
|
||||
#nacl
|
||||
GOOS=nacl GOARCH=386 go build && tar zcfv "${RELEASE}/proxy-nacl-386.tar.gz" proxy direct blocked
|
||||
GOOS=nacl GOARCH=amd64p32 go build && tar zcfv "${RELEASE}/proxy-nacl-amd64p32.tar.gz" proxy direct blocked
|
||||
GOOS=nacl GOARCH=arm go build && tar zcfv "${RELEASE}/proxy-nacl-arm.tar.gz" proxy direct blocked
|
||||
CGO_ENABLED=0 GOOS=nacl GOARCH=386 go build && tar zcfv "${RELEASE}/proxy-nacl-386.tar.gz" proxy direct blocked
|
||||
CGO_ENABLED=0 GOOS=nacl GOARCH=amd64p32 go build && tar zcfv "${RELEASE}/proxy-nacl-amd64p32.tar.gz" proxy direct blocked
|
||||
CGO_ENABLED=0 GOOS=nacl GOARCH=arm go build && tar zcfv "${RELEASE}/proxy-nacl-arm.tar.gz" proxy direct blocked
|
||||
#netbsd
|
||||
GOOS=netbsd GOARCH=386 go build && tar zcfv "${RELEASE}/proxy-netbsd-386.tar.gz" proxy direct blocked
|
||||
GOOS=netbsd GOARCH=amd64 go build && tar zcfv "${RELEASE}/proxy-netbsd-amd64.tar.gz" proxy direct blocked
|
||||
GOOS=netbsd GOARCH=arm go build && tar zcfv "${RELEASE}/proxy-netbsd-arm.tar.gz" proxy direct blocked
|
||||
CGO_ENABLED=0 GOOS=netbsd GOARCH=386 go build && tar zcfv "${RELEASE}/proxy-netbsd-386.tar.gz" proxy direct blocked
|
||||
CGO_ENABLED=0 GOOS=netbsd GOARCH=amd64 go build && tar zcfv "${RELEASE}/proxy-netbsd-amd64.tar.gz" proxy direct blocked
|
||||
CGO_ENABLED=0 GOOS=netbsd GOARCH=arm go build && tar zcfv "${RELEASE}/proxy-netbsd-arm.tar.gz" proxy direct blocked
|
||||
#openbsd
|
||||
GOOS=openbsd GOARCH=386 go build && tar zcfv "${RELEASE}/proxy-openbsd-386.tar.gz" proxy direct blocked
|
||||
GOOS=openbsd GOARCH=amd64 go build && tar zcfv "${RELEASE}/proxy-openbsd-amd64.tar.gz" proxy direct blocked
|
||||
GOOS=openbsd GOARCH=arm go build && tar zcfv "${RELEASE}/proxy-openbsd-arm.tar.gz" proxy direct blocked
|
||||
CGO_ENABLED=0 GOOS=openbsd GOARCH=386 go build && tar zcfv "${RELEASE}/proxy-openbsd-386.tar.gz" proxy direct blocked
|
||||
CGO_ENABLED=0 GOOS=openbsd GOARCH=amd64 go build && tar zcfv "${RELEASE}/proxy-openbsd-amd64.tar.gz" proxy direct blocked
|
||||
CGO_ENABLED=0 GOOS=openbsd GOARCH=arm go build && tar zcfv "${RELEASE}/proxy-openbsd-arm.tar.gz" proxy direct blocked
|
||||
#plan9
|
||||
GOOS=plan9 GOARCH=386 go build && tar zcfv "${RELEASE}/proxy-plan9-386.tar.gz" proxy direct blocked
|
||||
GOOS=plan9 GOARCH=amd64 go build && tar zcfv "${RELEASE}/proxy-plan9-amd64.tar.gz" proxy direct blocked
|
||||
GOOS=plan9 GOARCH=arm go build && tar zcfv "${RELEASE}/proxy-plan9-arm.tar.gz" proxy direct blocked
|
||||
CGO_ENABLED=0 GOOS=plan9 GOARCH=386 go build && tar zcfv "${RELEASE}/proxy-plan9-386.tar.gz" proxy direct blocked
|
||||
CGO_ENABLED=0 GOOS=plan9 GOARCH=amd64 go build && tar zcfv "${RELEASE}/proxy-plan9-amd64.tar.gz" proxy direct blocked
|
||||
CGO_ENABLED=0 GOOS=plan9 GOARCH=arm go build && tar zcfv "${RELEASE}/proxy-plan9-arm.tar.gz" proxy direct blocked
|
||||
#solaris
|
||||
GOOS=solaris GOARCH=amd64 go build && tar zcfv "${RELEASE}/proxy-solaris-amd64.tar.gz" proxy direct blocked
|
||||
CGO_ENABLED=0 GOOS=solaris GOARCH=amd64 go build && tar zcfv "${RELEASE}/proxy-solaris-amd64.tar.gz" proxy direct blocked
|
||||
#windows
|
||||
GOOS=windows GOARCH=386 go build && tar zcfv "${RELEASE}/proxy-windows-386.tar.gz" proxy.exe direct blocked .cert/proxy.crt .cert/proxy.key
|
||||
GOOS=windows GOARCH=amd64 go build && tar zcfv "${RELEASE}/proxy-windows-amd64.tar.gz" proxy.exe direct blocked .cert/proxy.crt .cert/proxy.key
|
||||
CGO_ENABLED=0 GOOS=windows GOARCH=386 go build && tar zcfv "${RELEASE}/proxy-windows-386.tar.gz" proxy.exe direct blocked .cert/proxy.crt .cert/proxy.key
|
||||
CGO_ENABLED=0 GOOS=windows GOARCH=amd64 go build && tar zcfv "${RELEASE}/proxy-windows-amd64.tar.gz" proxy.exe direct blocked .cert/proxy.crt .cert/proxy.key
|
||||
|
||||
rm -rf proxy proxy.exe .cert
|
||||
|
||||
|
||||
@ -16,8 +16,45 @@ const (
|
||||
CONN_SERVER_HEARBEAT = uint8(3)
|
||||
CONN_SERVER = uint8(4)
|
||||
CONN_CLIENT = uint8(5)
|
||||
CONN_SERVER_MUX = uint8(6)
|
||||
CONN_CLIENT_MUX = uint8(7)
|
||||
)
|
||||
|
||||
type MuxServerArgs struct {
|
||||
Parent *string
|
||||
CertFile *string
|
||||
KeyFile *string
|
||||
CertBytes []byte
|
||||
KeyBytes []byte
|
||||
Local *string
|
||||
IsUDP *bool
|
||||
Key *string
|
||||
Remote *string
|
||||
Timeout *int
|
||||
Route *[]string
|
||||
Mgr *MuxServerManager
|
||||
IsCompress *bool
|
||||
}
|
||||
type MuxClientArgs struct {
|
||||
Parent *string
|
||||
CertFile *string
|
||||
KeyFile *string
|
||||
CertBytes []byte
|
||||
KeyBytes []byte
|
||||
Key *string
|
||||
Timeout *int
|
||||
IsCompress *bool
|
||||
}
|
||||
type MuxBridgeArgs struct {
|
||||
Parent *string
|
||||
CertFile *string
|
||||
KeyFile *string
|
||||
CertBytes []byte
|
||||
KeyBytes []byte
|
||||
Local *string
|
||||
Timeout *int
|
||||
IsCompress *bool
|
||||
}
|
||||
type TunnelServerArgs struct {
|
||||
Parent *string
|
||||
CertFile *string
|
||||
@ -31,6 +68,7 @@ type TunnelServerArgs struct {
|
||||
Timeout *int
|
||||
Route *[]string
|
||||
Mgr *TunnelServerManager
|
||||
Mux *bool
|
||||
}
|
||||
type TunnelClientArgs struct {
|
||||
Parent *string
|
||||
@ -40,6 +78,7 @@ type TunnelClientArgs struct {
|
||||
KeyBytes []byte
|
||||
Key *string
|
||||
Timeout *int
|
||||
Mux *bool
|
||||
}
|
||||
type TunnelBridgeArgs struct {
|
||||
Parent *string
|
||||
@ -49,6 +88,7 @@ type TunnelBridgeArgs struct {
|
||||
KeyBytes []byte
|
||||
Local *string
|
||||
Timeout *int
|
||||
Mux *bool
|
||||
}
|
||||
type TCPArgs struct {
|
||||
Parent *string
|
||||
@ -80,6 +120,10 @@ type HTTPArgs struct {
|
||||
Direct *string
|
||||
AuthFile *string
|
||||
Auth *[]string
|
||||
AuthURL *string
|
||||
AuthURLOkCode *int
|
||||
AuthURLTimeout *int
|
||||
AuthURLRetry *int
|
||||
ParentType *string
|
||||
LocalType *string
|
||||
Timeout *int
|
||||
@ -129,6 +173,10 @@ type SocksArgs struct {
|
||||
Direct *string
|
||||
AuthFile *string
|
||||
Auth *[]string
|
||||
AuthURL *string
|
||||
AuthURLOkCode *int
|
||||
AuthURLTimeout *int
|
||||
AuthURLRetry *int
|
||||
KCPMethod *string
|
||||
KCPKey *string
|
||||
UDPParent *string
|
||||
|
||||
@ -9,6 +9,7 @@ import (
|
||||
"proxy/utils"
|
||||
"runtime/debug"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/crypto/ssh"
|
||||
@ -115,20 +116,24 @@ func (s *HTTP) Start(args interface{}) (err error) {
|
||||
s.InitOutConnPool()
|
||||
}
|
||||
s.InitService()
|
||||
host, port, _ := net.SplitHostPort(*s.cfg.Local)
|
||||
p, _ := strconv.Atoi(port)
|
||||
sc := utils.NewServerChannel(host, p)
|
||||
if *s.cfg.LocalType == TYPE_TCP {
|
||||
err = sc.ListenTCP(s.callback)
|
||||
} else if *s.cfg.LocalType == TYPE_TLS {
|
||||
err = sc.ListenTls(s.cfg.CertBytes, s.cfg.KeyBytes, s.callback)
|
||||
} else if *s.cfg.LocalType == TYPE_KCP {
|
||||
err = sc.ListenKCP(*s.cfg.KCPMethod, *s.cfg.KCPKey, s.callback)
|
||||
for _, addr := range strings.Split(*s.cfg.Local, ",") {
|
||||
if addr != "" {
|
||||
host, port, _ := net.SplitHostPort(addr)
|
||||
p, _ := strconv.Atoi(port)
|
||||
sc := utils.NewServerChannel(host, p)
|
||||
if *s.cfg.LocalType == TYPE_TCP {
|
||||
err = sc.ListenTCP(s.callback)
|
||||
} else if *s.cfg.LocalType == TYPE_TLS {
|
||||
err = sc.ListenTls(s.cfg.CertBytes, s.cfg.KeyBytes, s.callback)
|
||||
} else if *s.cfg.LocalType == TYPE_KCP {
|
||||
err = sc.ListenKCP(*s.cfg.KCPMethod, *s.cfg.KCPKey, s.callback)
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
log.Printf("%s http(s) proxy on %s", *s.cfg.LocalType, (*sc.Listener).Addr())
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
log.Printf("%s http(s) proxy on %s", *s.cfg.LocalType, (*sc.Listener).Addr())
|
||||
return
|
||||
}
|
||||
|
||||
@ -146,28 +151,28 @@ func (s *HTTP) callback(inConn net.Conn) {
|
||||
req, err = utils.NewHTTPRequest(&inConn, 4096, s.IsBasicAuth(), &s.basicAuth)
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
log.Printf("decoder error , form %s, ERR:%s", err, inConn.RemoteAddr())
|
||||
log.Printf("decoder error , from %s, ERR:%s", inConn.RemoteAddr(), err)
|
||||
}
|
||||
utils.CloseConn(&inConn)
|
||||
return
|
||||
}
|
||||
address := req.Host
|
||||
|
||||
useProxy := true
|
||||
if *s.cfg.Parent == "" {
|
||||
useProxy = false
|
||||
} else if *s.cfg.Always {
|
||||
host, _, _ := net.SplitHostPort(address)
|
||||
useProxy := false
|
||||
if !utils.IsIternalIP(host) {
|
||||
useProxy = true
|
||||
} else {
|
||||
if req.IsHTTPS() {
|
||||
s.checker.Add(address, true, req.Method, "", nil)
|
||||
if *s.cfg.Parent == "" {
|
||||
useProxy = false
|
||||
} else if *s.cfg.Always {
|
||||
useProxy = true
|
||||
} else {
|
||||
s.checker.Add(address, false, req.Method, req.URL, req.HeadBuf)
|
||||
s.checker.Add(address)
|
||||
//var n, m uint
|
||||
useProxy, _, _ = s.checker.IsBlocked(req.Host)
|
||||
//log.Printf("blocked ? : %v, %s , fail:%d ,success:%d", useProxy, address, n, m)
|
||||
}
|
||||
//var n, m uint
|
||||
useProxy, _, _ = s.checker.IsBlocked(req.Host)
|
||||
//log.Printf("blocked ? : %v, %s , fail:%d ,success:%d", useProxy, address, n, m)
|
||||
}
|
||||
|
||||
log.Printf("use proxy : %v, %s", useProxy, address)
|
||||
|
||||
err = s.OutToTCP(useProxy, address, &inConn, &req)
|
||||
@ -322,6 +327,10 @@ func (s *HTTP) InitOutConnPool() {
|
||||
}
|
||||
func (s *HTTP) InitBasicAuth() (err error) {
|
||||
s.basicAuth = utils.NewBasicAuth()
|
||||
if *s.cfg.AuthURL != "" {
|
||||
s.basicAuth.SetAuthURL(*s.cfg.AuthURL, *s.cfg.AuthURLOkCode, *s.cfg.AuthURLTimeout, *s.cfg.AuthURLRetry)
|
||||
log.Printf("auth from %s", *s.cfg.AuthURL)
|
||||
}
|
||||
if *s.cfg.AuthFile != "" {
|
||||
var n = 0
|
||||
n, err = s.basicAuth.AddFromFile(*s.cfg.AuthFile)
|
||||
@ -338,7 +347,7 @@ func (s *HTTP) InitBasicAuth() (err error) {
|
||||
return
|
||||
}
|
||||
func (s *HTTP) IsBasicAuth() bool {
|
||||
return *s.cfg.AuthFile != "" || len(*s.cfg.Auth) > 0
|
||||
return *s.cfg.AuthFile != "" || len(*s.cfg.Auth) > 0 || *s.cfg.AuthURL != ""
|
||||
}
|
||||
func (s *HTTP) IsDeadLoop(inLocalAddr string, host string) bool {
|
||||
inIP, inPort, err := net.SplitHostPort(inLocalAddr)
|
||||
|
||||
155
services/mux_bridge.go
Normal file
@ -0,0 +1,155 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"proxy/utils"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/xtaci/smux"
|
||||
)
|
||||
|
||||
type MuxBridge struct {
|
||||
cfg MuxBridgeArgs
|
||||
clientControlConns utils.ConcurrentMap
|
||||
}
|
||||
|
||||
func NewMuxBridge() Service {
|
||||
return &MuxBridge{
|
||||
cfg: MuxBridgeArgs{},
|
||||
clientControlConns: utils.NewConcurrentMap(),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *MuxBridge) InitService() {
|
||||
|
||||
}
|
||||
func (s *MuxBridge) CheckArgs() {
|
||||
if *s.cfg.CertFile == "" || *s.cfg.KeyFile == "" {
|
||||
log.Fatalf("cert and key file required")
|
||||
}
|
||||
s.cfg.CertBytes, s.cfg.KeyBytes = utils.TlsBytes(*s.cfg.CertFile, *s.cfg.KeyFile)
|
||||
}
|
||||
func (s *MuxBridge) StopService() {
|
||||
|
||||
}
|
||||
func (s *MuxBridge) Start(args interface{}) (err error) {
|
||||
s.cfg = args.(MuxBridgeArgs)
|
||||
s.CheckArgs()
|
||||
s.InitService()
|
||||
host, port, _ := net.SplitHostPort(*s.cfg.Local)
|
||||
p, _ := strconv.Atoi(port)
|
||||
sc := utils.NewServerChannel(host, p)
|
||||
|
||||
err = sc.ListenTls(s.cfg.CertBytes, s.cfg.KeyBytes, func(inConn net.Conn) {
|
||||
reader := bufio.NewReader(inConn)
|
||||
|
||||
var err error
|
||||
var connType uint8
|
||||
var key string
|
||||
err = utils.ReadPacket(reader, &connType, &key)
|
||||
if err != nil {
|
||||
log.Printf("read error,ERR:%s", err)
|
||||
return
|
||||
}
|
||||
switch connType {
|
||||
case CONN_SERVER:
|
||||
var serverID string
|
||||
err = utils.ReadPacketData(reader, &serverID)
|
||||
if err != nil {
|
||||
log.Printf("read error,ERR:%s", err)
|
||||
return
|
||||
}
|
||||
log.Printf("server connection %s %s connected", serverID, key)
|
||||
session, err := smux.Server(inConn, nil)
|
||||
if err != nil {
|
||||
utils.CloseConn(&inConn)
|
||||
log.Printf("server session error,ERR:%s", err)
|
||||
return
|
||||
}
|
||||
for {
|
||||
stream, err := session.AcceptStream()
|
||||
if err != nil {
|
||||
session.Close()
|
||||
utils.CloseConn(&inConn)
|
||||
return
|
||||
}
|
||||
go s.callback(stream, serverID, key)
|
||||
}
|
||||
case CONN_CLIENT:
|
||||
|
||||
log.Printf("client connection %s connected", key)
|
||||
session, err := smux.Client(inConn, nil)
|
||||
if err != nil {
|
||||
utils.CloseConn(&inConn)
|
||||
log.Printf("client session error,ERR:%s", err)
|
||||
return
|
||||
}
|
||||
s.clientControlConns.Set(key, session)
|
||||
go func() {
|
||||
for {
|
||||
if session.IsClosed() {
|
||||
s.clientControlConns.Remove(key)
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Second * 5)
|
||||
}
|
||||
}()
|
||||
//log.Printf("set client session,key: %s", key)
|
||||
}
|
||||
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
log.Printf("proxy on mux bridge mode %s", (*sc.Listener).Addr())
|
||||
return
|
||||
}
|
||||
func (s *MuxBridge) Clean() {
|
||||
s.StopService()
|
||||
}
|
||||
func (s *MuxBridge) callback(inConn net.Conn, serverID, key string) {
|
||||
try := 20
|
||||
for {
|
||||
try--
|
||||
if try == 0 {
|
||||
break
|
||||
}
|
||||
session, ok := s.clientControlConns.Get(key)
|
||||
if !ok {
|
||||
log.Printf("client %s session not exists for server stream %s", key, serverID)
|
||||
time.Sleep(time.Second * 3)
|
||||
continue
|
||||
}
|
||||
stream, err := session.(*smux.Session).OpenStream()
|
||||
if err != nil {
|
||||
log.Printf("%s client session open stream %s fail, err: %s, retrying...", key, serverID, err)
|
||||
time.Sleep(time.Second * 3)
|
||||
continue
|
||||
} else {
|
||||
log.Printf("%s server %s stream created", key, serverID)
|
||||
die1 := make(chan bool, 1)
|
||||
die2 := make(chan bool, 1)
|
||||
go func() {
|
||||
io.Copy(stream, inConn)
|
||||
die1 <- true
|
||||
}()
|
||||
go func() {
|
||||
io.Copy(inConn, stream)
|
||||
die2 <- true
|
||||
}()
|
||||
select {
|
||||
case <-die1:
|
||||
case <-die2:
|
||||
}
|
||||
stream.Close()
|
||||
inConn.Close()
|
||||
log.Printf("%s server %s stream released", key, serverID)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
206
services/mux_client.go
Normal file
@ -0,0 +1,206 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"proxy/utils"
|
||||
"time"
|
||||
|
||||
"github.com/golang/snappy"
|
||||
"github.com/xtaci/smux"
|
||||
)
|
||||
|
||||
type MuxClient struct {
|
||||
cfg MuxClientArgs
|
||||
}
|
||||
|
||||
func NewMuxClient() Service {
|
||||
return &MuxClient{
|
||||
cfg: MuxClientArgs{},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *MuxClient) InitService() {
|
||||
|
||||
}
|
||||
|
||||
func (s *MuxClient) CheckArgs() {
|
||||
if *s.cfg.Parent != "" {
|
||||
log.Printf("use tls parent %s", *s.cfg.Parent)
|
||||
} else {
|
||||
log.Fatalf("parent required")
|
||||
}
|
||||
if *s.cfg.CertFile == "" || *s.cfg.KeyFile == "" {
|
||||
log.Fatalf("cert and key file required")
|
||||
}
|
||||
s.cfg.CertBytes, s.cfg.KeyBytes = utils.TlsBytes(*s.cfg.CertFile, *s.cfg.KeyFile)
|
||||
}
|
||||
func (s *MuxClient) StopService() {
|
||||
|
||||
}
|
||||
func (s *MuxClient) Start(args interface{}) (err error) {
|
||||
s.cfg = args.(MuxClientArgs)
|
||||
s.CheckArgs()
|
||||
s.InitService()
|
||||
log.Printf("proxy on mux client mode, compress %v", *s.cfg.IsCompress)
|
||||
for {
|
||||
var _conn tls.Conn
|
||||
_conn, err = utils.TlsConnectHost(*s.cfg.Parent, *s.cfg.Timeout, s.cfg.CertBytes, s.cfg.KeyBytes)
|
||||
if err != nil {
|
||||
log.Printf("connection err: %s, retrying...", err)
|
||||
time.Sleep(time.Second * 3)
|
||||
continue
|
||||
}
|
||||
conn := net.Conn(&_conn)
|
||||
_, err = conn.Write(utils.BuildPacket(CONN_CLIENT, *s.cfg.Key))
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
log.Printf("connection err: %s, retrying...", err)
|
||||
time.Sleep(time.Second * 3)
|
||||
continue
|
||||
}
|
||||
session, err := smux.Server(conn, nil)
|
||||
if err != nil {
|
||||
log.Printf("session err: %s, retrying...", err)
|
||||
conn.Close()
|
||||
time.Sleep(time.Second * 3)
|
||||
continue
|
||||
}
|
||||
for {
|
||||
stream, err := session.AcceptStream()
|
||||
if err != nil {
|
||||
log.Printf("accept stream err: %s, retrying...", err)
|
||||
session.Close()
|
||||
time.Sleep(time.Second * 3)
|
||||
break
|
||||
}
|
||||
go func() {
|
||||
var ID, clientLocalAddr, serverID string
|
||||
err = utils.ReadPacketData(stream, &ID, &clientLocalAddr, &serverID)
|
||||
if err != nil {
|
||||
log.Printf("read stream signal err: %s", err)
|
||||
stream.Close()
|
||||
return
|
||||
}
|
||||
log.Printf("signal revecived,server %s stream %s %s", serverID, ID, clientLocalAddr)
|
||||
protocol := clientLocalAddr[:3]
|
||||
localAddr := clientLocalAddr[4:]
|
||||
if protocol == "udp" {
|
||||
s.ServeUDP(stream, localAddr, ID)
|
||||
} else {
|
||||
s.ServeConn(stream, localAddr, ID)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
func (s *MuxClient) Clean() {
|
||||
s.StopService()
|
||||
}
|
||||
|
||||
func (s *MuxClient) ServeUDP(inConn *smux.Stream, localAddr, ID string) {
|
||||
|
||||
for {
|
||||
srcAddr, body, err := utils.ReadUDPPacket(inConn)
|
||||
if err != nil {
|
||||
log.Printf("udp packet revecived fail, err: %s", err)
|
||||
log.Printf("connection %s released", ID)
|
||||
inConn.Close()
|
||||
break
|
||||
} else {
|
||||
//log.Printf("udp packet revecived:%s,%v", srcAddr, body)
|
||||
go s.processUDPPacket(inConn, srcAddr, localAddr, body)
|
||||
}
|
||||
|
||||
}
|
||||
// }
|
||||
}
|
||||
func (s *MuxClient) processUDPPacket(inConn *smux.Stream, srcAddr, localAddr string, body []byte) {
|
||||
dstAddr, err := net.ResolveUDPAddr("udp", localAddr)
|
||||
if err != nil {
|
||||
log.Printf("can't resolve address: %s", err)
|
||||
inConn.Close()
|
||||
return
|
||||
}
|
||||
clientSrcAddr := &net.UDPAddr{IP: net.IPv4zero, Port: 0}
|
||||
conn, err := net.DialUDP("udp", clientSrcAddr, dstAddr)
|
||||
if err != nil {
|
||||
log.Printf("connect to udp %s fail,ERR:%s", dstAddr.String(), err)
|
||||
return
|
||||
}
|
||||
conn.SetDeadline(time.Now().Add(time.Millisecond * time.Duration(*s.cfg.Timeout)))
|
||||
_, err = conn.Write(body)
|
||||
if err != nil {
|
||||
log.Printf("send udp packet to %s fail,ERR:%s", dstAddr.String(), err)
|
||||
return
|
||||
}
|
||||
//log.Printf("send udp packet to %s success", dstAddr.String())
|
||||
buf := make([]byte, 1024)
|
||||
length, _, err := conn.ReadFromUDP(buf)
|
||||
if err != nil {
|
||||
log.Printf("read udp response from %s fail ,ERR:%s", dstAddr.String(), err)
|
||||
return
|
||||
}
|
||||
respBody := buf[0:length]
|
||||
//log.Printf("revecived udp packet from %s , %v", dstAddr.String(), respBody)
|
||||
bs := utils.UDPPacket(srcAddr, respBody)
|
||||
_, err = (*inConn).Write(bs)
|
||||
if err != nil {
|
||||
log.Printf("send udp response fail ,ERR:%s", err)
|
||||
inConn.Close()
|
||||
return
|
||||
}
|
||||
//log.Printf("send udp response success ,from:%s ,%d ,%v", dstAddr.String(), len(bs), bs)
|
||||
}
|
||||
func (s *MuxClient) ServeConn(inConn *smux.Stream, localAddr, ID string) {
|
||||
var err error
|
||||
var outConn net.Conn
|
||||
i := 0
|
||||
for {
|
||||
i++
|
||||
outConn, err = utils.ConnectHost(localAddr, *s.cfg.Timeout)
|
||||
if err == nil || i == 3 {
|
||||
break
|
||||
} else {
|
||||
if i == 3 {
|
||||
log.Printf("connect to %s err: %s, retrying...", localAddr, err)
|
||||
time.Sleep(2 * time.Second)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
inConn.Close()
|
||||
utils.CloseConn(&outConn)
|
||||
log.Printf("build connection error, err: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("stream %s created", ID)
|
||||
if *s.cfg.IsCompress {
|
||||
die1 := make(chan bool, 1)
|
||||
die2 := make(chan bool, 1)
|
||||
go func() {
|
||||
io.Copy(outConn, snappy.NewReader(inConn))
|
||||
die1 <- true
|
||||
}()
|
||||
go func() {
|
||||
io.Copy(snappy.NewWriter(inConn), outConn)
|
||||
die2 <- true
|
||||
}()
|
||||
select {
|
||||
case <-die1:
|
||||
case <-die2:
|
||||
}
|
||||
outConn.Close()
|
||||
inConn.Close()
|
||||
log.Printf("%s stream %s released", *s.cfg.Key, ID)
|
||||
} else {
|
||||
utils.IoBind(inConn, outConn, func(err interface{}) {
|
||||
log.Printf("stream %s released", ID)
|
||||
})
|
||||
}
|
||||
}
|
||||
333
services/mux_server.go
Normal file
@ -0,0 +1,333 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"proxy/utils"
|
||||
"runtime/debug"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/snappy"
|
||||
"github.com/xtaci/smux"
|
||||
)
|
||||
|
||||
type MuxServer struct {
|
||||
cfg MuxServerArgs
|
||||
udpChn chan MuxUDPItem
|
||||
sc utils.ServerChannel
|
||||
session *smux.Session
|
||||
lockChn chan bool
|
||||
}
|
||||
|
||||
type MuxServerManager struct {
|
||||
cfg MuxServerArgs
|
||||
udpChn chan MuxUDPItem
|
||||
sc utils.ServerChannel
|
||||
serverID string
|
||||
}
|
||||
|
||||
func NewMuxServerManager() Service {
|
||||
return &MuxServerManager{
|
||||
cfg: MuxServerArgs{},
|
||||
udpChn: make(chan MuxUDPItem, 50000),
|
||||
serverID: utils.Uniqueid(),
|
||||
}
|
||||
}
|
||||
func (s *MuxServerManager) Start(args interface{}) (err error) {
|
||||
s.cfg = args.(MuxServerArgs)
|
||||
s.CheckArgs()
|
||||
if *s.cfg.Parent != "" {
|
||||
log.Printf("use tls parent %s", *s.cfg.Parent)
|
||||
} else {
|
||||
log.Fatalf("parent required")
|
||||
}
|
||||
|
||||
s.InitService()
|
||||
|
||||
log.Printf("server id: %s", s.serverID)
|
||||
//log.Printf("route:%v", *s.cfg.Route)
|
||||
for _, _info := range *s.cfg.Route {
|
||||
if _info == "" {
|
||||
continue
|
||||
}
|
||||
IsUDP := *s.cfg.IsUDP
|
||||
if strings.HasPrefix(_info, "udp://") {
|
||||
IsUDP = true
|
||||
}
|
||||
info := strings.TrimPrefix(_info, "udp://")
|
||||
info = strings.TrimPrefix(info, "tcp://")
|
||||
_routeInfo := strings.Split(info, "@")
|
||||
server := NewMuxServer()
|
||||
local := _routeInfo[0]
|
||||
remote := _routeInfo[1]
|
||||
KEY := *s.cfg.Key
|
||||
if strings.HasPrefix(remote, "[") {
|
||||
KEY = remote[1:strings.LastIndex(remote, "]")]
|
||||
remote = remote[strings.LastIndex(remote, "]")+1:]
|
||||
}
|
||||
if strings.HasPrefix(remote, ":") {
|
||||
remote = fmt.Sprintf("127.0.0.1%s", remote)
|
||||
}
|
||||
err = server.Start(MuxServerArgs{
|
||||
CertBytes: s.cfg.CertBytes,
|
||||
KeyBytes: s.cfg.KeyBytes,
|
||||
Parent: s.cfg.Parent,
|
||||
CertFile: s.cfg.CertFile,
|
||||
KeyFile: s.cfg.KeyFile,
|
||||
Local: &local,
|
||||
IsUDP: &IsUDP,
|
||||
Remote: &remote,
|
||||
Key: &KEY,
|
||||
Timeout: s.cfg.Timeout,
|
||||
Mgr: s,
|
||||
IsCompress: s.cfg.IsCompress,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
func (s *MuxServerManager) Clean() {
|
||||
s.StopService()
|
||||
}
|
||||
func (s *MuxServerManager) StopService() {
|
||||
}
|
||||
func (s *MuxServerManager) CheckArgs() {
|
||||
if *s.cfg.CertFile == "" || *s.cfg.KeyFile == "" {
|
||||
log.Fatalf("cert and key file required")
|
||||
}
|
||||
s.cfg.CertBytes, s.cfg.KeyBytes = utils.TlsBytes(*s.cfg.CertFile, *s.cfg.KeyFile)
|
||||
}
|
||||
func (s *MuxServerManager) InitService() {
|
||||
}
|
||||
|
||||
func NewMuxServer() Service {
|
||||
return &MuxServer{
|
||||
cfg: MuxServerArgs{},
|
||||
udpChn: make(chan MuxUDPItem, 50000),
|
||||
lockChn: make(chan bool, 1),
|
||||
}
|
||||
}
|
||||
|
||||
type MuxUDPItem struct {
|
||||
packet *[]byte
|
||||
localAddr *net.UDPAddr
|
||||
srcAddr *net.UDPAddr
|
||||
}
|
||||
|
||||
func (s *MuxServer) InitService() {
|
||||
s.UDPConnDeamon()
|
||||
}
|
||||
func (s *MuxServer) CheckArgs() {
|
||||
if *s.cfg.Remote == "" {
|
||||
log.Fatalf("remote required")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *MuxServer) Start(args interface{}) (err error) {
|
||||
s.cfg = args.(MuxServerArgs)
|
||||
s.CheckArgs()
|
||||
s.InitService()
|
||||
host, port, _ := net.SplitHostPort(*s.cfg.Local)
|
||||
p, _ := strconv.Atoi(port)
|
||||
s.sc = utils.NewServerChannel(host, p)
|
||||
if *s.cfg.IsUDP {
|
||||
err = s.sc.ListenUDP(func(packet []byte, localAddr, srcAddr *net.UDPAddr) {
|
||||
s.udpChn <- MuxUDPItem{
|
||||
packet: &packet,
|
||||
localAddr: localAddr,
|
||||
srcAddr: srcAddr,
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
log.Printf("proxy on udp mux server mode %s", (*s.sc.UDPListener).LocalAddr())
|
||||
} else {
|
||||
err = s.sc.ListenTCP(func(inConn net.Conn) {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
log.Printf("connection handler crashed with err : %s \nstack: %s", err, string(debug.Stack()))
|
||||
}
|
||||
}()
|
||||
var outConn net.Conn
|
||||
var ID string
|
||||
for {
|
||||
outConn, ID, err = s.GetOutConn()
|
||||
if err != nil {
|
||||
utils.CloseConn(&outConn)
|
||||
log.Printf("connect to %s fail, err: %s, retrying...", *s.cfg.Parent, err)
|
||||
time.Sleep(time.Second * 3)
|
||||
continue
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
log.Printf("%s stream %s created", *s.cfg.Key, ID)
|
||||
if *s.cfg.IsCompress {
|
||||
die1 := make(chan bool, 1)
|
||||
die2 := make(chan bool, 1)
|
||||
go func() {
|
||||
io.Copy(inConn, snappy.NewReader(outConn))
|
||||
die1 <- true
|
||||
}()
|
||||
go func() {
|
||||
io.Copy(snappy.NewWriter(outConn), inConn)
|
||||
die2 <- true
|
||||
}()
|
||||
select {
|
||||
case <-die1:
|
||||
case <-die2:
|
||||
}
|
||||
outConn.Close()
|
||||
inConn.Close()
|
||||
log.Printf("%s stream %s released", *s.cfg.Key, ID)
|
||||
} else {
|
||||
utils.IoBind(inConn, outConn, func(err interface{}) {
|
||||
log.Printf("%s stream %s released", *s.cfg.Key, ID)
|
||||
})
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
log.Printf("proxy on mux server mode %s, compress %v", (*s.sc.Listener).Addr(), *s.cfg.IsCompress)
|
||||
}
|
||||
return
|
||||
}
|
||||
func (s *MuxServer) Clean() {
|
||||
|
||||
}
|
||||
func (s *MuxServer) GetOutConn() (outConn net.Conn, ID string, err error) {
|
||||
outConn, err = s.GetConn()
|
||||
if err != nil {
|
||||
log.Printf("connection err: %s", err)
|
||||
return
|
||||
}
|
||||
remoteAddr := "tcp:" + *s.cfg.Remote
|
||||
if *s.cfg.IsUDP {
|
||||
remoteAddr = "udp:" + *s.cfg.Remote
|
||||
}
|
||||
ID = utils.Uniqueid()
|
||||
_, err = outConn.Write(utils.BuildPacketData(ID, remoteAddr, s.cfg.Mgr.serverID))
|
||||
if err != nil {
|
||||
log.Printf("write stream data err: %s ,retrying...", err)
|
||||
utils.CloseConn(&outConn)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
func (s *MuxServer) GetConn() (conn net.Conn, err error) {
|
||||
select {
|
||||
case s.lockChn <- true:
|
||||
default:
|
||||
err = fmt.Errorf("can not connect at same time")
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
<-s.lockChn
|
||||
}()
|
||||
if s.session == nil {
|
||||
var _conn tls.Conn
|
||||
_conn, err = utils.TlsConnectHost(*s.cfg.Parent, *s.cfg.Timeout, s.cfg.CertBytes, s.cfg.KeyBytes)
|
||||
if err != nil {
|
||||
s.session = nil
|
||||
return
|
||||
}
|
||||
c := net.Conn(&_conn)
|
||||
_, err = c.Write(utils.BuildPacket(CONN_SERVER, *s.cfg.Key, s.cfg.Mgr.serverID))
|
||||
if err != nil {
|
||||
c.Close()
|
||||
s.session = nil
|
||||
return
|
||||
}
|
||||
if err == nil {
|
||||
s.session, err = smux.Client(c, nil)
|
||||
if err != nil {
|
||||
s.session = nil
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
conn, err = s.session.OpenStream()
|
||||
if err != nil {
|
||||
s.session.Close()
|
||||
s.session = nil
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
func (s *MuxServer) UDPConnDeamon() {
|
||||
go func() {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
log.Printf("udp conn deamon crashed with err : %s \nstack: %s", err, string(debug.Stack()))
|
||||
}
|
||||
}()
|
||||
var outConn net.Conn
|
||||
var ID string
|
||||
var err error
|
||||
for {
|
||||
item := <-s.udpChn
|
||||
RETRY:
|
||||
if outConn == nil {
|
||||
for {
|
||||
outConn, ID, err = s.GetOutConn()
|
||||
if err != nil {
|
||||
outConn = nil
|
||||
utils.CloseConn(&outConn)
|
||||
log.Printf("connect to %s fail, err: %s, retrying...", *s.cfg.Parent, err)
|
||||
time.Sleep(time.Second * 3)
|
||||
continue
|
||||
} else {
|
||||
go func(outConn net.Conn, ID string) {
|
||||
go func() {
|
||||
// outConn.Close()
|
||||
}()
|
||||
for {
|
||||
srcAddrFromConn, body, err := utils.ReadUDPPacket(outConn)
|
||||
if err != nil {
|
||||
log.Printf("parse revecived udp packet fail, err: %s ,%v", err, body)
|
||||
log.Printf("UDP deamon connection %s exited", ID)
|
||||
break
|
||||
}
|
||||
//log.Printf("udp packet revecived over parent , local:%s", srcAddrFromConn)
|
||||
_srcAddr := strings.Split(srcAddrFromConn, ":")
|
||||
if len(_srcAddr) != 2 {
|
||||
log.Printf("parse revecived udp packet fail, addr error : %s", srcAddrFromConn)
|
||||
continue
|
||||
}
|
||||
port, _ := strconv.Atoi(_srcAddr[1])
|
||||
dstAddr := &net.UDPAddr{IP: net.ParseIP(_srcAddr[0]), Port: port}
|
||||
_, err = s.sc.UDPListener.WriteToUDP(body, dstAddr)
|
||||
if err != nil {
|
||||
log.Printf("udp response to local %s fail,ERR:%s", srcAddrFromConn, err)
|
||||
continue
|
||||
}
|
||||
//log.Printf("udp response to local %s success , %v", srcAddrFromConn, body)
|
||||
}
|
||||
}(outConn, ID)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
outConn.SetWriteDeadline(time.Now().Add(time.Second))
|
||||
_, err = outConn.Write(utils.UDPPacket(item.srcAddr.String(), *item.packet))
|
||||
outConn.SetWriteDeadline(time.Time{})
|
||||
if err != nil {
|
||||
utils.CloseConn(&outConn)
|
||||
outConn = nil
|
||||
log.Printf("write udp packet to %s fail ,flush err:%s ,retrying...", *s.cfg.Parent, err)
|
||||
goto RETRY
|
||||
}
|
||||
//log.Printf("write packet %v", *item.packet)
|
||||
}
|
||||
}()
|
||||
}
|
||||
@ -10,6 +10,7 @@ import (
|
||||
"proxy/utils/aes"
|
||||
"proxy/utils/socks"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/crypto/ssh"
|
||||
@ -353,7 +354,8 @@ func (s *Socks) socksConnCallback(inConn net.Conn) {
|
||||
pass := string(r[2+r[1]+1:])
|
||||
//log.Printf("user:%s,pass:%s", user, pass)
|
||||
//auth
|
||||
if s.basicAuth.CheckUserPass(user, pass) {
|
||||
_addr := strings.Split(inConn.RemoteAddr().String(), ":")
|
||||
if s.basicAuth.CheckUserPass(user, pass, _addr[0], "") {
|
||||
inConn.Write([]byte{0x01, 0x00})
|
||||
} else {
|
||||
inConn.Write([]byte{0x01, 0x01})
|
||||
@ -414,8 +416,14 @@ func (s *Socks) proxyTCP(inConn *net.Conn, methodReq socks.MethodsRequest, reque
|
||||
outConn, err = s.getOutConn(methodReq.Bytes(), request.Bytes(), request.Addr())
|
||||
} else {
|
||||
if *s.cfg.Parent != "" {
|
||||
s.checker.Add(request.Addr(), true, "", "", nil)
|
||||
useProxy, _, _ = s.checker.IsBlocked(request.Addr())
|
||||
host, _, _ := net.SplitHostPort(request.Addr())
|
||||
useProxy := false
|
||||
if utils.IsIternalIP(host) {
|
||||
useProxy = false
|
||||
} else {
|
||||
s.checker.Add(request.Addr())
|
||||
useProxy, _, _ = s.checker.IsBlocked(request.Addr())
|
||||
}
|
||||
if useProxy {
|
||||
outConn, err = s.getOutConn(methodReq.Bytes(), request.Bytes(), request.Addr())
|
||||
} else {
|
||||
@ -490,11 +498,11 @@ func (s *Socks) getOutConn(methodBytes, reqBytes []byte, host string) (outConn n
|
||||
err = fmt.Errorf("write req detail fail,%s", err)
|
||||
return
|
||||
}
|
||||
// _, err = outConn.Read(buf)
|
||||
// if err != nil {
|
||||
// err = fmt.Errorf("read req reply fail,%s", err)
|
||||
// return
|
||||
// }
|
||||
_, err = outConn.Read(buf)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("read req reply fail,%s", err)
|
||||
return
|
||||
}
|
||||
//result := buf[:n]
|
||||
//log.Printf("result:%v", result)
|
||||
|
||||
@ -560,6 +568,10 @@ func (s *Socks) ConnectSSH() (err error) {
|
||||
}
|
||||
func (s *Socks) InitBasicAuth() (err error) {
|
||||
s.basicAuth = utils.NewBasicAuth()
|
||||
if *s.cfg.AuthURL != "" {
|
||||
s.basicAuth.SetAuthURL(*s.cfg.AuthURL, *s.cfg.AuthURLOkCode, *s.cfg.AuthURLTimeout, *s.cfg.AuthURLRetry)
|
||||
log.Printf("auth from %s", *s.cfg.AuthURL)
|
||||
}
|
||||
if *s.cfg.AuthFile != "" {
|
||||
var n = 0
|
||||
n, err = s.basicAuth.AddFromFile(*s.cfg.AuthFile)
|
||||
@ -576,7 +588,7 @@ func (s *Socks) InitBasicAuth() (err error) {
|
||||
return
|
||||
}
|
||||
func (s *Socks) IsBasicAuth() bool {
|
||||
return *s.cfg.AuthFile != "" || len(*s.cfg.Auth) > 0
|
||||
return *s.cfg.AuthFile != "" || len(*s.cfg.Auth) > 0 || *s.cfg.AuthURL != ""
|
||||
}
|
||||
func (s *Socks) IsDeadLoop(inLocalAddr string, host string) bool {
|
||||
inIP, inPort, err := net.SplitHostPort(inLocalAddr)
|
||||
|
||||
@ -17,8 +17,8 @@ type TunnelBridge struct {
|
||||
cfg TunnelBridgeArgs
|
||||
serverConns utils.ConcurrentMap
|
||||
clientControlConns utils.ConcurrentMap
|
||||
cmServer utils.ConnManager
|
||||
cmClient utils.ConnManager
|
||||
// cmServer utils.ConnManager
|
||||
// cmClient utils.ConnManager
|
||||
}
|
||||
|
||||
func NewTunnelBridge() Service {
|
||||
@ -26,8 +26,8 @@ func NewTunnelBridge() Service {
|
||||
cfg: TunnelBridgeArgs{},
|
||||
serverConns: utils.NewConcurrentMap(),
|
||||
clientControlConns: utils.NewConcurrentMap(),
|
||||
cmServer: utils.NewConnManager(),
|
||||
cmClient: utils.NewConnManager(),
|
||||
// cmServer: utils.NewConnManager(),
|
||||
// cmClient: utils.NewConnManager(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -92,7 +92,7 @@ func (s *TunnelBridge) Start(args interface{}) (err error) {
|
||||
time.Sleep(time.Second * 3)
|
||||
continue
|
||||
} else {
|
||||
s.cmServer.Add(serverID, ID, &inConn)
|
||||
// s.cmServer.Add(serverID, ID, &inConn)
|
||||
break
|
||||
}
|
||||
}
|
||||
@ -114,11 +114,11 @@ func (s *TunnelBridge) Start(args interface{}) (err error) {
|
||||
serverConn := serverConnItem.(ServerConn).Conn
|
||||
utils.IoBind(*serverConn, inConn, func(err interface{}) {
|
||||
s.serverConns.Remove(ID)
|
||||
s.cmClient.RemoveOne(key, ID)
|
||||
s.cmServer.RemoveOne(serverID, ID)
|
||||
// s.cmClient.RemoveOne(key, ID)
|
||||
// s.cmServer.RemoveOne(serverID, ID)
|
||||
log.Printf("conn %s released", ID)
|
||||
})
|
||||
s.cmClient.Add(key, ID, &inConn)
|
||||
// s.cmClient.Add(key, ID, &inConn)
|
||||
log.Printf("conn %s created", ID)
|
||||
|
||||
case CONN_CLIENT_CONTROL:
|
||||
@ -136,101 +136,101 @@ func (s *TunnelBridge) Start(args interface{}) (err error) {
|
||||
s.clientControlConns.Set(key, &inConn)
|
||||
log.Printf("set client %s control conn", key)
|
||||
|
||||
case CONN_SERVER_HEARBEAT:
|
||||
var serverID string
|
||||
err = utils.ReadPacketData(reader, &serverID)
|
||||
if err != nil {
|
||||
log.Printf("read error,ERR:%s", err)
|
||||
return
|
||||
}
|
||||
log.Printf("server heartbeat connection, id: %s", serverID)
|
||||
writeDie := make(chan bool)
|
||||
readDie := make(chan bool)
|
||||
go func() {
|
||||
for {
|
||||
inConn.SetWriteDeadline(time.Now().Add(time.Second * 3))
|
||||
_, err = inConn.Write([]byte{0x00})
|
||||
inConn.SetWriteDeadline(time.Time{})
|
||||
if err != nil {
|
||||
log.Printf("server heartbeat connection write err %s", err)
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Second * 3)
|
||||
}
|
||||
close(writeDie)
|
||||
}()
|
||||
go func() {
|
||||
for {
|
||||
signal := make([]byte, 1)
|
||||
inConn.SetReadDeadline(time.Now().Add(time.Second * 6))
|
||||
_, err := inConn.Read(signal)
|
||||
inConn.SetReadDeadline(time.Time{})
|
||||
if err != nil {
|
||||
log.Printf("server heartbeat connection read err: %s", err)
|
||||
break
|
||||
} else {
|
||||
// log.Printf("heartbeat from server ,id:%s", serverID)
|
||||
}
|
||||
}
|
||||
close(readDie)
|
||||
}()
|
||||
select {
|
||||
case <-readDie:
|
||||
case <-writeDie:
|
||||
}
|
||||
utils.CloseConn(&inConn)
|
||||
s.cmServer.Remove(serverID)
|
||||
log.Printf("server heartbeat conn %s released", serverID)
|
||||
case CONN_CLIENT_HEARBEAT:
|
||||
var clientID string
|
||||
err = utils.ReadPacketData(reader, &clientID)
|
||||
if err != nil {
|
||||
log.Printf("read error,ERR:%s", err)
|
||||
return
|
||||
}
|
||||
log.Printf("client heartbeat connection, id: %s", clientID)
|
||||
writeDie := make(chan bool)
|
||||
readDie := make(chan bool)
|
||||
go func() {
|
||||
for {
|
||||
inConn.SetWriteDeadline(time.Now().Add(time.Second * 3))
|
||||
_, err = inConn.Write([]byte{0x00})
|
||||
inConn.SetWriteDeadline(time.Time{})
|
||||
if err != nil {
|
||||
log.Printf("client heartbeat connection write err %s", err)
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Second * 3)
|
||||
}
|
||||
close(writeDie)
|
||||
}()
|
||||
go func() {
|
||||
for {
|
||||
signal := make([]byte, 1)
|
||||
inConn.SetReadDeadline(time.Now().Add(time.Second * 6))
|
||||
_, err := inConn.Read(signal)
|
||||
inConn.SetReadDeadline(time.Time{})
|
||||
if err != nil {
|
||||
log.Printf("client control connection read err: %s", err)
|
||||
break
|
||||
} else {
|
||||
// log.Printf("heartbeat from client ,id:%s", clientID)
|
||||
}
|
||||
}
|
||||
close(readDie)
|
||||
}()
|
||||
select {
|
||||
case <-readDie:
|
||||
case <-writeDie:
|
||||
}
|
||||
utils.CloseConn(&inConn)
|
||||
s.cmClient.Remove(clientID)
|
||||
if s.clientControlConns.Has(clientID) {
|
||||
item, _ := s.clientControlConns.Get(clientID)
|
||||
(*item.(*net.Conn)).Close()
|
||||
}
|
||||
s.clientControlConns.Remove(clientID)
|
||||
log.Printf("client heartbeat conn %s released", clientID)
|
||||
// case CONN_SERVER_HEARBEAT:
|
||||
// var serverID string
|
||||
// err = utils.ReadPacketData(reader, &serverID)
|
||||
// if err != nil {
|
||||
// log.Printf("read error,ERR:%s", err)
|
||||
// return
|
||||
// }
|
||||
// log.Printf("server heartbeat connection, id: %s", serverID)
|
||||
// writeDie := make(chan bool)
|
||||
// readDie := make(chan bool)
|
||||
// go func() {
|
||||
// for {
|
||||
// inConn.SetWriteDeadline(time.Now().Add(time.Second * 3))
|
||||
// _, err = inConn.Write([]byte{0x00})
|
||||
// inConn.SetWriteDeadline(time.Time{})
|
||||
// if err != nil {
|
||||
// log.Printf("server heartbeat connection write err %s", err)
|
||||
// break
|
||||
// }
|
||||
// time.Sleep(time.Second * 3)
|
||||
// }
|
||||
// close(writeDie)
|
||||
// }()
|
||||
// go func() {
|
||||
// for {
|
||||
// signal := make([]byte, 1)
|
||||
// inConn.SetReadDeadline(time.Now().Add(time.Second * 6))
|
||||
// _, err := inConn.Read(signal)
|
||||
// inConn.SetReadDeadline(time.Time{})
|
||||
// if err != nil {
|
||||
// log.Printf("server heartbeat connection read err: %s", err)
|
||||
// break
|
||||
// } else {
|
||||
// // log.Printf("heartbeat from server ,id:%s", serverID)
|
||||
// }
|
||||
// }
|
||||
// close(readDie)
|
||||
// }()
|
||||
// select {
|
||||
// case <-readDie:
|
||||
// case <-writeDie:
|
||||
// }
|
||||
// utils.CloseConn(&inConn)
|
||||
// s.cmServer.Remove(serverID)
|
||||
// log.Printf("server heartbeat conn %s released", serverID)
|
||||
// case CONN_CLIENT_HEARBEAT:
|
||||
// var clientID string
|
||||
// err = utils.ReadPacketData(reader, &clientID)
|
||||
// if err != nil {
|
||||
// log.Printf("read error,ERR:%s", err)
|
||||
// return
|
||||
// }
|
||||
// log.Printf("client heartbeat connection, id: %s", clientID)
|
||||
// writeDie := make(chan bool)
|
||||
// readDie := make(chan bool)
|
||||
// go func() {
|
||||
// for {
|
||||
// inConn.SetWriteDeadline(time.Now().Add(time.Second * 3))
|
||||
// _, err = inConn.Write([]byte{0x00})
|
||||
// inConn.SetWriteDeadline(time.Time{})
|
||||
// if err != nil {
|
||||
// log.Printf("client heartbeat connection write err %s", err)
|
||||
// break
|
||||
// }
|
||||
// time.Sleep(time.Second * 3)
|
||||
// }
|
||||
// close(writeDie)
|
||||
// }()
|
||||
// go func() {
|
||||
// for {
|
||||
// signal := make([]byte, 1)
|
||||
// inConn.SetReadDeadline(time.Now().Add(time.Second * 6))
|
||||
// _, err := inConn.Read(signal)
|
||||
// inConn.SetReadDeadline(time.Time{})
|
||||
// if err != nil {
|
||||
// log.Printf("client control connection read err: %s", err)
|
||||
// break
|
||||
// } else {
|
||||
// // log.Printf("heartbeat from client ,id:%s", clientID)
|
||||
// }
|
||||
// }
|
||||
// close(readDie)
|
||||
// }()
|
||||
// select {
|
||||
// case <-readDie:
|
||||
// case <-writeDie:
|
||||
// }
|
||||
// utils.CloseConn(&inConn)
|
||||
// s.cmClient.Remove(clientID)
|
||||
// if s.clientControlConns.Has(clientID) {
|
||||
// item, _ := s.clientControlConns.Get(clientID)
|
||||
// (*item.(*net.Conn)).Close()
|
||||
// }
|
||||
// s.clientControlConns.Remove(clientID)
|
||||
// log.Printf("client heartbeat conn %s released", clientID)
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@ -11,79 +11,80 @@ import (
|
||||
)
|
||||
|
||||
type TunnelClient struct {
|
||||
cfg TunnelClientArgs
|
||||
cm utils.ConnManager
|
||||
cfg TunnelClientArgs
|
||||
// cm utils.ConnManager
|
||||
ctrlConn net.Conn
|
||||
}
|
||||
|
||||
func NewTunnelClient() Service {
|
||||
return &TunnelClient{
|
||||
cfg: TunnelClientArgs{},
|
||||
cm: utils.NewConnManager(),
|
||||
// cm: utils.NewConnManager(),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *TunnelClient) InitService() {
|
||||
s.InitHeartbeatDeamon()
|
||||
// s.InitHeartbeatDeamon()
|
||||
}
|
||||
func (s *TunnelClient) InitHeartbeatDeamon() {
|
||||
log.Printf("heartbeat started")
|
||||
go func() {
|
||||
var heartbeatConn net.Conn
|
||||
var ID = *s.cfg.Key
|
||||
for {
|
||||
|
||||
//close all connection
|
||||
s.cm.RemoveAll()
|
||||
if s.ctrlConn != nil {
|
||||
s.ctrlConn.Close()
|
||||
}
|
||||
utils.CloseConn(&heartbeatConn)
|
||||
heartbeatConn, err := s.GetInConn(CONN_CLIENT_HEARBEAT, ID)
|
||||
if err != nil {
|
||||
log.Printf("heartbeat connection err: %s, retrying...", err)
|
||||
time.Sleep(time.Second * 3)
|
||||
utils.CloseConn(&heartbeatConn)
|
||||
continue
|
||||
}
|
||||
log.Printf("heartbeat connection created,id:%s", ID)
|
||||
writeDie := make(chan bool)
|
||||
readDie := make(chan bool)
|
||||
go func() {
|
||||
for {
|
||||
heartbeatConn.SetWriteDeadline(time.Now().Add(time.Second * 3))
|
||||
_, err = heartbeatConn.Write([]byte{0x00})
|
||||
heartbeatConn.SetWriteDeadline(time.Time{})
|
||||
if err != nil {
|
||||
log.Printf("heartbeat connection write err %s", err)
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Second * 3)
|
||||
}
|
||||
close(writeDie)
|
||||
}()
|
||||
go func() {
|
||||
for {
|
||||
signal := make([]byte, 1)
|
||||
heartbeatConn.SetReadDeadline(time.Now().Add(time.Second * 6))
|
||||
_, err := heartbeatConn.Read(signal)
|
||||
heartbeatConn.SetReadDeadline(time.Time{})
|
||||
if err != nil {
|
||||
log.Printf("heartbeat connection read err: %s", err)
|
||||
break
|
||||
} else {
|
||||
//log.Printf("heartbeat from bridge")
|
||||
}
|
||||
}
|
||||
close(readDie)
|
||||
}()
|
||||
select {
|
||||
case <-readDie:
|
||||
case <-writeDie:
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
// func (s *TunnelClient) InitHeartbeatDeamon() {
|
||||
// log.Printf("heartbeat started")
|
||||
// go func() {
|
||||
// var heartbeatConn net.Conn
|
||||
// var ID = *s.cfg.Key
|
||||
// for {
|
||||
|
||||
// //close all connection
|
||||
// s.cm.RemoveAll()
|
||||
// if s.ctrlConn != nil {
|
||||
// s.ctrlConn.Close()
|
||||
// }
|
||||
// utils.CloseConn(&heartbeatConn)
|
||||
// heartbeatConn, err := s.GetInConn(CONN_CLIENT_HEARBEAT, ID)
|
||||
// if err != nil {
|
||||
// log.Printf("heartbeat connection err: %s, retrying...", err)
|
||||
// time.Sleep(time.Second * 3)
|
||||
// utils.CloseConn(&heartbeatConn)
|
||||
// continue
|
||||
// }
|
||||
// log.Printf("heartbeat connection created,id:%s", ID)
|
||||
// writeDie := make(chan bool)
|
||||
// readDie := make(chan bool)
|
||||
// go func() {
|
||||
// for {
|
||||
// heartbeatConn.SetWriteDeadline(time.Now().Add(time.Second * 3))
|
||||
// _, err = heartbeatConn.Write([]byte{0x00})
|
||||
// heartbeatConn.SetWriteDeadline(time.Time{})
|
||||
// if err != nil {
|
||||
// log.Printf("heartbeat connection write err %s", err)
|
||||
// break
|
||||
// }
|
||||
// time.Sleep(time.Second * 3)
|
||||
// }
|
||||
// close(writeDie)
|
||||
// }()
|
||||
// go func() {
|
||||
// for {
|
||||
// signal := make([]byte, 1)
|
||||
// heartbeatConn.SetReadDeadline(time.Now().Add(time.Second * 6))
|
||||
// _, err := heartbeatConn.Read(signal)
|
||||
// heartbeatConn.SetReadDeadline(time.Time{})
|
||||
// if err != nil {
|
||||
// log.Printf("heartbeat connection read err: %s", err)
|
||||
// break
|
||||
// } else {
|
||||
// //log.Printf("heartbeat from bridge")
|
||||
// }
|
||||
// }
|
||||
// close(readDie)
|
||||
// }()
|
||||
// select {
|
||||
// case <-readDie:
|
||||
// case <-writeDie:
|
||||
// }
|
||||
// }
|
||||
// }()
|
||||
// }
|
||||
func (s *TunnelClient) CheckArgs() {
|
||||
if *s.cfg.Parent != "" {
|
||||
log.Printf("use tls parent %s", *s.cfg.Parent)
|
||||
@ -96,7 +97,7 @@ func (s *TunnelClient) CheckArgs() {
|
||||
s.cfg.CertBytes, s.cfg.KeyBytes = utils.TlsBytes(*s.cfg.CertFile, *s.cfg.KeyFile)
|
||||
}
|
||||
func (s *TunnelClient) StopService() {
|
||||
s.cm.RemoveAll()
|
||||
// s.cm.RemoveAll()
|
||||
}
|
||||
func (s *TunnelClient) Start(args interface{}) (err error) {
|
||||
s.cfg = args.(TunnelClientArgs)
|
||||
@ -106,7 +107,7 @@ func (s *TunnelClient) Start(args interface{}) (err error) {
|
||||
|
||||
for {
|
||||
//close all conn
|
||||
s.cm.Remove(*s.cfg.Key)
|
||||
// s.cm.Remove(*s.cfg.Key)
|
||||
if s.ctrlConn != nil {
|
||||
s.ctrlConn.Close()
|
||||
}
|
||||
@ -171,7 +172,7 @@ func (s *TunnelClient) ServeUDP(localAddr, ID, serverID string) {
|
||||
var err error
|
||||
// for {
|
||||
for {
|
||||
s.cm.RemoveOne(*s.cfg.Key, ID)
|
||||
// s.cm.RemoveOne(*s.cfg.Key, ID)
|
||||
inConn, err = s.GetInConn(CONN_CLIENT, *s.cfg.Key, ID, serverID)
|
||||
if err != nil {
|
||||
utils.CloseConn(&inConn)
|
||||
@ -182,7 +183,7 @@ func (s *TunnelClient) ServeUDP(localAddr, ID, serverID string) {
|
||||
break
|
||||
}
|
||||
}
|
||||
s.cm.Add(*s.cfg.Key, ID, &inConn)
|
||||
// s.cm.Add(*s.cfg.Key, ID, &inConn)
|
||||
log.Printf("conn %s created", ID)
|
||||
|
||||
for {
|
||||
@ -275,8 +276,8 @@ func (s *TunnelClient) ServeConn(localAddr, ID, serverID string) {
|
||||
}
|
||||
utils.IoBind(inConn, outConn, func(err interface{}) {
|
||||
log.Printf("conn %s released", ID)
|
||||
s.cm.RemoveOne(*s.cfg.Key, ID)
|
||||
// s.cm.RemoveOne(*s.cfg.Key, ID)
|
||||
})
|
||||
s.cm.Add(*s.cfg.Key, ID, &inConn)
|
||||
// s.cm.Add(*s.cfg.Key, ID, &inConn)
|
||||
log.Printf("conn %s created", ID)
|
||||
}
|
||||
|
||||
@ -24,7 +24,7 @@ type TunnelServerManager struct {
|
||||
udpChn chan UDPItem
|
||||
sc utils.ServerChannel
|
||||
serverID string
|
||||
cm utils.ConnManager
|
||||
// cm utils.ConnManager
|
||||
}
|
||||
|
||||
func NewTunnelServerManager() Service {
|
||||
@ -32,7 +32,7 @@ func NewTunnelServerManager() Service {
|
||||
cfg: TunnelServerArgs{},
|
||||
udpChn: make(chan UDPItem, 50000),
|
||||
serverID: utils.Uniqueid(),
|
||||
cm: utils.NewConnManager(),
|
||||
// cm: utils.NewConnManager(),
|
||||
}
|
||||
}
|
||||
func (s *TunnelServerManager) Start(args interface{}) (err error) {
|
||||
@ -91,7 +91,7 @@ func (s *TunnelServerManager) Clean() {
|
||||
s.StopService()
|
||||
}
|
||||
func (s *TunnelServerManager) StopService() {
|
||||
s.cm.RemoveAll()
|
||||
// s.cm.RemoveAll()
|
||||
}
|
||||
func (s *TunnelServerManager) CheckArgs() {
|
||||
if *s.cfg.CertFile == "" || *s.cfg.KeyFile == "" {
|
||||
@ -100,62 +100,63 @@ func (s *TunnelServerManager) CheckArgs() {
|
||||
s.cfg.CertBytes, s.cfg.KeyBytes = utils.TlsBytes(*s.cfg.CertFile, *s.cfg.KeyFile)
|
||||
}
|
||||
func (s *TunnelServerManager) InitService() {
|
||||
s.InitHeartbeatDeamon()
|
||||
}
|
||||
func (s *TunnelServerManager) InitHeartbeatDeamon() {
|
||||
log.Printf("heartbeat started")
|
||||
go func() {
|
||||
var heartbeatConn net.Conn
|
||||
var ID string
|
||||
for {
|
||||
//close all connection
|
||||
s.cm.Remove(ID)
|
||||
utils.CloseConn(&heartbeatConn)
|
||||
heartbeatConn, ID, err := s.GetOutConn(CONN_SERVER_HEARBEAT)
|
||||
if err != nil {
|
||||
log.Printf("heartbeat connection err: %s, retrying...", err)
|
||||
time.Sleep(time.Second * 3)
|
||||
utils.CloseConn(&heartbeatConn)
|
||||
continue
|
||||
}
|
||||
log.Printf("heartbeat connection created,id:%s", ID)
|
||||
writeDie := make(chan bool)
|
||||
readDie := make(chan bool)
|
||||
go func() {
|
||||
for {
|
||||
heartbeatConn.SetWriteDeadline(time.Now().Add(time.Second * 3))
|
||||
_, err = heartbeatConn.Write([]byte{0x00})
|
||||
heartbeatConn.SetWriteDeadline(time.Time{})
|
||||
if err != nil {
|
||||
log.Printf("heartbeat connection write err %s", err)
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Second * 3)
|
||||
}
|
||||
close(writeDie)
|
||||
}()
|
||||
go func() {
|
||||
for {
|
||||
signal := make([]byte, 1)
|
||||
heartbeatConn.SetReadDeadline(time.Now().Add(time.Second * 6))
|
||||
_, err := heartbeatConn.Read(signal)
|
||||
heartbeatConn.SetReadDeadline(time.Time{})
|
||||
if err != nil {
|
||||
log.Printf("heartbeat connection read err: %s", err)
|
||||
break
|
||||
} else {
|
||||
// log.Printf("heartbeat from bridge")
|
||||
}
|
||||
}
|
||||
close(readDie)
|
||||
}()
|
||||
select {
|
||||
case <-readDie:
|
||||
case <-writeDie:
|
||||
}
|
||||
}
|
||||
}()
|
||||
// s.InitHeartbeatDeamon()
|
||||
}
|
||||
|
||||
// func (s *TunnelServerManager) InitHeartbeatDeamon() {
|
||||
// log.Printf("heartbeat started")
|
||||
// go func() {
|
||||
// var heartbeatConn net.Conn
|
||||
// var ID string
|
||||
// for {
|
||||
// //close all connection
|
||||
// s.cm.Remove(ID)
|
||||
// utils.CloseConn(&heartbeatConn)
|
||||
// heartbeatConn, ID, err := s.GetOutConn(CONN_SERVER_HEARBEAT)
|
||||
// if err != nil {
|
||||
// log.Printf("heartbeat connection err: %s, retrying...", err)
|
||||
// time.Sleep(time.Second * 3)
|
||||
// utils.CloseConn(&heartbeatConn)
|
||||
// continue
|
||||
// }
|
||||
// log.Printf("heartbeat connection created,id:%s", ID)
|
||||
// writeDie := make(chan bool)
|
||||
// readDie := make(chan bool)
|
||||
// go func() {
|
||||
// for {
|
||||
// heartbeatConn.SetWriteDeadline(time.Now().Add(time.Second * 3))
|
||||
// _, err = heartbeatConn.Write([]byte{0x00})
|
||||
// heartbeatConn.SetWriteDeadline(time.Time{})
|
||||
// if err != nil {
|
||||
// log.Printf("heartbeat connection write err %s", err)
|
||||
// break
|
||||
// }
|
||||
// time.Sleep(time.Second * 3)
|
||||
// }
|
||||
// close(writeDie)
|
||||
// }()
|
||||
// go func() {
|
||||
// for {
|
||||
// signal := make([]byte, 1)
|
||||
// heartbeatConn.SetReadDeadline(time.Now().Add(time.Second * 6))
|
||||
// _, err := heartbeatConn.Read(signal)
|
||||
// heartbeatConn.SetReadDeadline(time.Time{})
|
||||
// if err != nil {
|
||||
// log.Printf("heartbeat connection read err: %s", err)
|
||||
// break
|
||||
// } else {
|
||||
// // log.Printf("heartbeat from bridge")
|
||||
// }
|
||||
// }
|
||||
// close(readDie)
|
||||
// }()
|
||||
// select {
|
||||
// case <-readDie:
|
||||
// case <-writeDie:
|
||||
// }
|
||||
// }
|
||||
// }()
|
||||
// }
|
||||
func (s *TunnelServerManager) GetOutConn(typ uint8) (outConn net.Conn, ID string, err error) {
|
||||
outConn, err = s.GetConn()
|
||||
if err != nil {
|
||||
@ -241,11 +242,11 @@ func (s *TunnelServer) Start(args interface{}) (err error) {
|
||||
}
|
||||
}
|
||||
utils.IoBind(inConn, outConn, func(err interface{}) {
|
||||
s.cfg.Mgr.cm.RemoveOne(s.cfg.Mgr.serverID, ID)
|
||||
// s.cfg.Mgr.cm.RemoveOne(s.cfg.Mgr.serverID, ID)
|
||||
log.Printf("%s conn %s released", *s.cfg.Key, ID)
|
||||
})
|
||||
//add conn
|
||||
s.cfg.Mgr.cm.Add(s.cfg.Mgr.serverID, ID, &inConn)
|
||||
// s.cfg.Mgr.cm.Add(s.cfg.Mgr.serverID, ID, &inConn)
|
||||
log.Printf("%s conn %s created", *s.cfg.Key, ID)
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
#!/bin/bash
|
||||
rm -rf /usr/bin/proxy
|
||||
rm -rf /usr/bin/proxyd
|
||||
echo "uninstall done"
|
||||
|
||||
@ -12,7 +12,6 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
@ -20,6 +19,7 @@ import (
|
||||
|
||||
"golang.org/x/crypto/pbkdf2"
|
||||
|
||||
"proxy/utils/id"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@ -42,7 +42,8 @@ func IoBind(dst io.ReadWriteCloser, src io.ReadWriteCloser, fn func(err interfac
|
||||
log.Printf("bind crashed %s", err)
|
||||
}
|
||||
}()
|
||||
_, err := io.Copy(dst, src)
|
||||
//_, err := io.Copy(dst, src)
|
||||
err := ioCopy(dst, src)
|
||||
e1 <- err
|
||||
}()
|
||||
go func() {
|
||||
@ -51,7 +52,8 @@ func IoBind(dst io.ReadWriteCloser, src io.ReadWriteCloser, fn func(err interfac
|
||||
log.Printf("bind crashed %s", err)
|
||||
}
|
||||
}()
|
||||
_, err := io.Copy(src, dst)
|
||||
//_, err := io.Copy(src, dst)
|
||||
err := ioCopy(src, dst)
|
||||
e2 <- err
|
||||
}()
|
||||
var err interface{}
|
||||
@ -299,9 +301,10 @@ func ReadUDPPacket(_reader io.Reader) (srcAddr string, packet []byte, err error)
|
||||
return
|
||||
}
|
||||
func Uniqueid() string {
|
||||
var src = rand.NewSource(time.Now().UnixNano())
|
||||
s := fmt.Sprintf("%d", src.Int63())
|
||||
return s[len(s)-5:len(s)-1] + fmt.Sprintf("%d", uint64(time.Now().UnixNano()))[8:]
|
||||
return xid.New().String()
|
||||
// var src = rand.NewSource(time.Now().UnixNano())
|
||||
// s := fmt.Sprintf("%d", src.Int63())
|
||||
// return s[len(s)-5:len(s)-1] + fmt.Sprintf("%d", uint64(time.Now().UnixNano()))[8:]
|
||||
}
|
||||
func ReadData(r io.Reader) (data string, err error) {
|
||||
var len uint16
|
||||
@ -428,6 +431,52 @@ func GetKCPBlock(method, key string) (block kcp.BlockCrypt) {
|
||||
}
|
||||
return
|
||||
}
|
||||
func HttpGet(URL string, timeout int) (body []byte, code int, err error) {
|
||||
var tr *http.Transport
|
||||
var client *http.Client
|
||||
conf := &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
if strings.Contains(URL, "https://") {
|
||||
tr = &http.Transport{TLSClientConfig: conf}
|
||||
client = &http.Client{Timeout: time.Millisecond * time.Duration(timeout), Transport: tr}
|
||||
} else {
|
||||
tr = &http.Transport{}
|
||||
client = &http.Client{Timeout: time.Millisecond * time.Duration(timeout), Transport: tr}
|
||||
}
|
||||
defer tr.CloseIdleConnections()
|
||||
resp, err := client.Get(URL)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
code = resp.StatusCode
|
||||
body, err = ioutil.ReadAll(resp.Body)
|
||||
return
|
||||
}
|
||||
func IsIternalIP(domainOrIP string) bool {
|
||||
var outIPs []net.IP
|
||||
outIPs, err := net.LookupIP(domainOrIP)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
for _, ip := range outIPs {
|
||||
if ip.IsLoopback() {
|
||||
return true
|
||||
}
|
||||
if ip.To4().Mask(net.IPv4Mask(255, 0, 0, 0)).String() == "10.0.0.0" {
|
||||
return true
|
||||
}
|
||||
if ip.To4().Mask(net.IPv4Mask(255, 0, 0, 0)).String() == "192.168.0.0" {
|
||||
return true
|
||||
}
|
||||
if ip.To4().Mask(net.IPv4Mask(255, 0, 0, 0)).String() == "172.0.0.0" {
|
||||
i, _ := strconv.Atoi(strings.Split(ip.To4().String(), ".")[1])
|
||||
return i >= 16 && i <= 31
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// type sockaddr struct {
|
||||
// family uint16
|
||||
|
||||
264
utils/id/xid.go
Normal file
@ -0,0 +1,264 @@
|
||||
// Package xid is a globally unique id generator suited for web scale
|
||||
//
|
||||
// Xid is using Mongo Object ID algorithm to generate globally unique ids:
|
||||
// https://docs.mongodb.org/manual/reference/object-id/
|
||||
//
|
||||
// - 4-byte value representing the seconds since the Unix epoch,
|
||||
// - 3-byte machine identifier,
|
||||
// - 2-byte process id, and
|
||||
// - 3-byte counter, starting with a random value.
|
||||
//
|
||||
// The binary representation of the id is compatible with Mongo 12 bytes Object IDs.
|
||||
// The string representation is using base32 hex (w/o padding) for better space efficiency
|
||||
// when stored in that form (20 bytes). The hex variant of base32 is used to retain the
|
||||
// sortable property of the id.
|
||||
//
|
||||
// Xid doesn't use base64 because case sensitivity and the 2 non alphanum chars may be an
|
||||
// issue when transported as a string between various systems. Base36 wasn't retained either
|
||||
// because 1/ it's not standard 2/ the resulting size is not predictable (not bit aligned)
|
||||
// and 3/ it would not remain sortable. To validate a base32 `xid`, expect a 20 chars long,
|
||||
// all lowercase sequence of `a` to `v` letters and `0` to `9` numbers (`[0-9a-v]{20}`).
|
||||
//
|
||||
// UUID is 16 bytes (128 bits), snowflake is 8 bytes (64 bits), xid stands in between
|
||||
// with 12 bytes with a more compact string representation ready for the web and no
|
||||
// required configuration or central generation server.
|
||||
//
|
||||
// Features:
|
||||
//
|
||||
// - Size: 12 bytes (96 bits), smaller than UUID, larger than snowflake
|
||||
// - Base32 hex encoded by default (16 bytes storage when transported as printable string)
|
||||
// - Non configured, you don't need set a unique machine and/or data center id
|
||||
// - K-ordered
|
||||
// - Embedded time with 1 second precision
|
||||
// - Unicity guaranted for 16,777,216 (24 bits) unique ids per second and per host/process
|
||||
//
|
||||
// Best used with xlog's RequestIDHandler (https://godoc.org/github.com/rs/xlog#RequestIDHandler).
|
||||
//
|
||||
// References:
|
||||
//
|
||||
// - http://www.slideshare.net/davegardnerisme/unique-id-generation-in-distributed-systems
|
||||
// - https://en.wikipedia.org/wiki/Universally_unique_identifier
|
||||
// - https://blog.twitter.com/2010/announcing-snowflake
|
||||
package xid
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"crypto/rand"
|
||||
"database/sql/driver"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Code inspired from mgo/bson ObjectId
|
||||
|
||||
// ID represents a unique request id
|
||||
type ID [rawLen]byte
|
||||
|
||||
const (
|
||||
encodedLen = 20 // string encoded len
|
||||
decodedLen = 15 // len after base32 decoding with the padded data
|
||||
rawLen = 12 // binary raw len
|
||||
|
||||
// encoding stores a custom version of the base32 encoding with lower case
|
||||
// letters.
|
||||
encoding = "0123456789abcdefghijklmnopqrstuv"
|
||||
)
|
||||
|
||||
// ErrInvalidID is returned when trying to unmarshal an invalid ID
|
||||
var ErrInvalidID = errors.New("xid: invalid ID")
|
||||
|
||||
// objectIDCounter is atomically incremented when generating a new ObjectId
|
||||
// using NewObjectId() function. It's used as a counter part of an id.
|
||||
// This id is initialized with a random value.
|
||||
var objectIDCounter = randInt()
|
||||
|
||||
// machineId stores machine id generated once and used in subsequent calls
|
||||
// to NewObjectId function.
|
||||
var machineID = readMachineID()
|
||||
|
||||
// pid stores the current process id
|
||||
var pid = os.Getpid()
|
||||
|
||||
// dec is the decoding map for base32 encoding
|
||||
var dec [256]byte
|
||||
|
||||
func init() {
|
||||
for i := 0; i < len(dec); i++ {
|
||||
dec[i] = 0xFF
|
||||
}
|
||||
for i := 0; i < len(encoding); i++ {
|
||||
dec[encoding[i]] = byte(i)
|
||||
}
|
||||
}
|
||||
|
||||
// readMachineId generates machine id and puts it into the machineId global
|
||||
// variable. If this function fails to get the hostname, it will cause
|
||||
// a runtime error.
|
||||
func readMachineID() []byte {
|
||||
id := make([]byte, 3)
|
||||
if hostname, err := os.Hostname(); err == nil {
|
||||
hw := md5.New()
|
||||
hw.Write([]byte(hostname))
|
||||
copy(id, hw.Sum(nil))
|
||||
} else {
|
||||
// Fallback to rand number if machine id can't be gathered
|
||||
if _, randErr := rand.Reader.Read(id); randErr != nil {
|
||||
panic(fmt.Errorf("xid: cannot get hostname nor generate a random number: %v; %v", err, randErr))
|
||||
}
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// randInt generates a random uint32
|
||||
func randInt() uint32 {
|
||||
b := make([]byte, 3)
|
||||
if _, err := rand.Reader.Read(b); err != nil {
|
||||
panic(fmt.Errorf("xid: cannot generate random number: %v;", err))
|
||||
}
|
||||
return uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2])
|
||||
}
|
||||
|
||||
// New generates a globaly unique ID
|
||||
func New() ID {
|
||||
var id ID
|
||||
// Timestamp, 4 bytes, big endian
|
||||
binary.BigEndian.PutUint32(id[:], uint32(time.Now().Unix()))
|
||||
// Machine, first 3 bytes of md5(hostname)
|
||||
id[4] = machineID[0]
|
||||
id[5] = machineID[1]
|
||||
id[6] = machineID[2]
|
||||
// Pid, 2 bytes, specs don't specify endianness, but we use big endian.
|
||||
id[7] = byte(pid >> 8)
|
||||
id[8] = byte(pid)
|
||||
// Increment, 3 bytes, big endian
|
||||
i := atomic.AddUint32(&objectIDCounter, 1)
|
||||
id[9] = byte(i >> 16)
|
||||
id[10] = byte(i >> 8)
|
||||
id[11] = byte(i)
|
||||
return id
|
||||
}
|
||||
|
||||
// FromString reads an ID from its string representation
|
||||
func FromString(id string) (ID, error) {
|
||||
i := &ID{}
|
||||
err := i.UnmarshalText([]byte(id))
|
||||
return *i, err
|
||||
}
|
||||
|
||||
// String returns a base32 hex lowercased with no padding representation of the id (char set is 0-9, a-v).
|
||||
func (id ID) String() string {
|
||||
text := make([]byte, encodedLen)
|
||||
encode(text, id[:])
|
||||
return string(text)
|
||||
}
|
||||
|
||||
// MarshalText implements encoding/text TextMarshaler interface
|
||||
func (id ID) MarshalText() ([]byte, error) {
|
||||
text := make([]byte, encodedLen)
|
||||
encode(text, id[:])
|
||||
return text, nil
|
||||
}
|
||||
|
||||
// encode by unrolling the stdlib base32 algorithm + removing all safe checks
|
||||
func encode(dst, id []byte) {
|
||||
dst[0] = encoding[id[0]>>3]
|
||||
dst[1] = encoding[(id[1]>>6)&0x1F|(id[0]<<2)&0x1F]
|
||||
dst[2] = encoding[(id[1]>>1)&0x1F]
|
||||
dst[3] = encoding[(id[2]>>4)&0x1F|(id[1]<<4)&0x1F]
|
||||
dst[4] = encoding[id[3]>>7|(id[2]<<1)&0x1F]
|
||||
dst[5] = encoding[(id[3]>>2)&0x1F]
|
||||
dst[6] = encoding[id[4]>>5|(id[3]<<3)&0x1F]
|
||||
dst[7] = encoding[id[4]&0x1F]
|
||||
dst[8] = encoding[id[5]>>3]
|
||||
dst[9] = encoding[(id[6]>>6)&0x1F|(id[5]<<2)&0x1F]
|
||||
dst[10] = encoding[(id[6]>>1)&0x1F]
|
||||
dst[11] = encoding[(id[7]>>4)&0x1F|(id[6]<<4)&0x1F]
|
||||
dst[12] = encoding[id[8]>>7|(id[7]<<1)&0x1F]
|
||||
dst[13] = encoding[(id[8]>>2)&0x1F]
|
||||
dst[14] = encoding[(id[9]>>5)|(id[8]<<3)&0x1F]
|
||||
dst[15] = encoding[id[9]&0x1F]
|
||||
dst[16] = encoding[id[10]>>3]
|
||||
dst[17] = encoding[(id[11]>>6)&0x1F|(id[10]<<2)&0x1F]
|
||||
dst[18] = encoding[(id[11]>>1)&0x1F]
|
||||
dst[19] = encoding[(id[11]<<4)&0x1F]
|
||||
}
|
||||
|
||||
// UnmarshalText implements encoding/text TextUnmarshaler interface
|
||||
func (id *ID) UnmarshalText(text []byte) error {
|
||||
if len(text) != encodedLen {
|
||||
return ErrInvalidID
|
||||
}
|
||||
for _, c := range text {
|
||||
if dec[c] == 0xFF {
|
||||
return ErrInvalidID
|
||||
}
|
||||
}
|
||||
decode(id, text)
|
||||
return nil
|
||||
}
|
||||
|
||||
// decode by unrolling the stdlib base32 algorithm + removing all safe checks
|
||||
func decode(id *ID, src []byte) {
|
||||
id[0] = dec[src[0]]<<3 | dec[src[1]]>>2
|
||||
id[1] = dec[src[1]]<<6 | dec[src[2]]<<1 | dec[src[3]]>>4
|
||||
id[2] = dec[src[3]]<<4 | dec[src[4]]>>1
|
||||
id[3] = dec[src[4]]<<7 | dec[src[5]]<<2 | dec[src[6]]>>3
|
||||
id[4] = dec[src[6]]<<5 | dec[src[7]]
|
||||
id[5] = dec[src[8]]<<3 | dec[src[9]]>>2
|
||||
id[6] = dec[src[9]]<<6 | dec[src[10]]<<1 | dec[src[11]]>>4
|
||||
id[7] = dec[src[11]]<<4 | dec[src[12]]>>1
|
||||
id[8] = dec[src[12]]<<7 | dec[src[13]]<<2 | dec[src[14]]>>3
|
||||
id[9] = dec[src[14]]<<5 | dec[src[15]]
|
||||
id[10] = dec[src[16]]<<3 | dec[src[17]]>>2
|
||||
id[11] = dec[src[17]]<<6 | dec[src[18]]<<1 | dec[src[19]]>>4
|
||||
}
|
||||
|
||||
// Time returns the timestamp part of the id.
|
||||
// It's a runtime error to call this method with an invalid id.
|
||||
func (id ID) Time() time.Time {
|
||||
// First 4 bytes of ObjectId is 32-bit big-endian seconds from epoch.
|
||||
secs := int64(binary.BigEndian.Uint32(id[0:4]))
|
||||
return time.Unix(secs, 0)
|
||||
}
|
||||
|
||||
// Machine returns the 3-byte machine id part of the id.
|
||||
// It's a runtime error to call this method with an invalid id.
|
||||
func (id ID) Machine() []byte {
|
||||
return id[4:7]
|
||||
}
|
||||
|
||||
// Pid returns the process id part of the id.
|
||||
// It's a runtime error to call this method with an invalid id.
|
||||
func (id ID) Pid() uint16 {
|
||||
return binary.BigEndian.Uint16(id[7:9])
|
||||
}
|
||||
|
||||
// Counter returns the incrementing value part of the id.
|
||||
// It's a runtime error to call this method with an invalid id.
|
||||
func (id ID) Counter() int32 {
|
||||
b := id[9:12]
|
||||
// Counter is stored as big-endian 3-byte value
|
||||
return int32(uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2]))
|
||||
}
|
||||
|
||||
// Value implements the driver.Valuer interface.
|
||||
func (id ID) Value() (driver.Value, error) {
|
||||
b, err := id.MarshalText()
|
||||
return string(b), err
|
||||
}
|
||||
|
||||
// Scan implements the sql.Scanner interface.
|
||||
func (id *ID) Scan(value interface{}) (err error) {
|
||||
switch val := value.(type) {
|
||||
case string:
|
||||
return id.UnmarshalText([]byte(val))
|
||||
case []byte:
|
||||
return id.UnmarshalText(val)
|
||||
default:
|
||||
return fmt.Errorf("xid: scanning unsupported type: %T", value)
|
||||
}
|
||||
}
|
||||
173
utils/sni/sni.go
Normal file
@ -0,0 +1,173 @@
|
||||
package sni
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
)
|
||||
|
||||
func ServerNameFromBytes(data []byte) (sn string, err error) {
|
||||
reader := bytes.NewReader(data)
|
||||
bufferedReader := bufio.NewReader(reader)
|
||||
c := bufferedConn{bufferedReader, nil, nil}
|
||||
sn, _, err = ServerNameFromConn(c)
|
||||
return
|
||||
}
|
||||
|
||||
type bufferedConn struct {
|
||||
r *bufio.Reader
|
||||
rout io.Reader
|
||||
net.Conn
|
||||
}
|
||||
|
||||
func newBufferedConn(c net.Conn) bufferedConn {
|
||||
return bufferedConn{bufio.NewReader(c), nil, c}
|
||||
}
|
||||
|
||||
func (b bufferedConn) Peek(n int) ([]byte, error) {
|
||||
return b.r.Peek(n)
|
||||
}
|
||||
|
||||
func (b bufferedConn) Read(p []byte) (int, error) {
|
||||
if b.rout != nil {
|
||||
return b.rout.Read(p)
|
||||
}
|
||||
return b.r.Read(p)
|
||||
}
|
||||
|
||||
var malformedError = errors.New("malformed client hello")
|
||||
|
||||
func getHello(b []byte) (string, error) {
|
||||
rest := b[5:]
|
||||
|
||||
if len(rest) == 0 {
|
||||
return "", malformedError
|
||||
}
|
||||
|
||||
current := 0
|
||||
handshakeType := rest[0]
|
||||
current += 1
|
||||
if handshakeType != 0x1 {
|
||||
return "", errors.New("Not a ClientHello")
|
||||
}
|
||||
|
||||
// Skip over another length
|
||||
current += 3
|
||||
// Skip over protocolversion
|
||||
current += 2
|
||||
// Skip over random number
|
||||
current += 4 + 28
|
||||
|
||||
if current > len(rest) {
|
||||
return "", malformedError
|
||||
}
|
||||
|
||||
// Skip over session ID
|
||||
sessionIDLength := int(rest[current])
|
||||
current += 1
|
||||
current += sessionIDLength
|
||||
|
||||
if current+1 > len(rest) {
|
||||
return "", malformedError
|
||||
}
|
||||
|
||||
cipherSuiteLength := (int(rest[current]) << 8) + int(rest[current+1])
|
||||
current += 2
|
||||
current += cipherSuiteLength
|
||||
|
||||
if current > len(rest) {
|
||||
return "", malformedError
|
||||
}
|
||||
compressionMethodLength := int(rest[current])
|
||||
current += 1
|
||||
current += compressionMethodLength
|
||||
|
||||
if current > len(rest) {
|
||||
return "", errors.New("no extensions")
|
||||
}
|
||||
|
||||
current += 2
|
||||
|
||||
hostname := ""
|
||||
for current+4 < len(rest) && hostname == "" {
|
||||
extensionType := (int(rest[current]) << 8) + int(rest[current+1])
|
||||
current += 2
|
||||
|
||||
extensionDataLength := (int(rest[current]) << 8) + int(rest[current+1])
|
||||
current += 2
|
||||
|
||||
if extensionType == 0 {
|
||||
|
||||
// Skip over number of names as we're assuming there's just one
|
||||
current += 2
|
||||
if current > len(rest) {
|
||||
return "", malformedError
|
||||
}
|
||||
|
||||
nameType := rest[current]
|
||||
current += 1
|
||||
if nameType != 0 {
|
||||
return "", errors.New("Not a hostname")
|
||||
}
|
||||
if current+1 > len(rest) {
|
||||
return "", malformedError
|
||||
}
|
||||
nameLen := (int(rest[current]) << 8) + int(rest[current+1])
|
||||
current += 2
|
||||
if current+nameLen > len(rest) {
|
||||
return "", malformedError
|
||||
}
|
||||
hostname = string(rest[current : current+nameLen])
|
||||
}
|
||||
|
||||
current += extensionDataLength
|
||||
}
|
||||
if hostname == "" {
|
||||
return "", errors.New("No hostname")
|
||||
}
|
||||
return hostname, nil
|
||||
|
||||
}
|
||||
|
||||
func getHelloBytes(c bufferedConn) ([]byte, error) {
|
||||
b, err := c.Peek(5)
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
|
||||
if b[0] != 0x16 {
|
||||
return []byte{}, errors.New("not TLS")
|
||||
}
|
||||
|
||||
restLengthBytes := b[3:]
|
||||
restLength := (int(restLengthBytes[0]) << 8) + int(restLengthBytes[1])
|
||||
|
||||
return c.Peek(5 + restLength)
|
||||
|
||||
}
|
||||
|
||||
func getServername(c bufferedConn) (string, []byte, error) {
|
||||
all, err := getHelloBytes(c)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
name, err := getHello(all)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
return name, all, err
|
||||
|
||||
}
|
||||
|
||||
// Uses SNI to get the name of the server from the connection. Returns the ServerName and a buffered connection that will not have been read off of.
|
||||
func ServerNameFromConn(c net.Conn) (string, net.Conn, error) {
|
||||
bufconn := newBufferedConn(c)
|
||||
sn, helloBytes, err := getServername(bufconn)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
bufconn.rout = io.MultiReader(bytes.NewBuffer(helloBytes), c)
|
||||
return sn, bufconn, nil
|
||||
}
|
||||
165
utils/structs.go
@ -11,6 +11,7 @@ import (
|
||||
"log"
|
||||
"net"
|
||||
"net/url"
|
||||
"proxy/utils/sni"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@ -74,21 +75,19 @@ func (c *Checker) loadMap(f string) (dataMap ConcurrentMap) {
|
||||
}
|
||||
func (c *Checker) start() {
|
||||
go func() {
|
||||
//log.Printf("checker started")
|
||||
for {
|
||||
//log.Printf("checker did")
|
||||
for _, v := range c.data.Items() {
|
||||
go func(item CheckerItem) {
|
||||
if c.isNeedCheck(item) {
|
||||
//log.Printf("check %s", item.Domain)
|
||||
//log.Printf("check %s", item.Host)
|
||||
var conn net.Conn
|
||||
var err error
|
||||
if item.IsHTTPS {
|
||||
conn, err = ConnectHost(item.Host, c.timeout)
|
||||
if err == nil {
|
||||
conn.SetDeadline(time.Now().Add(time.Millisecond))
|
||||
conn.Close()
|
||||
}
|
||||
} else {
|
||||
err = HTTPGet(item.URL, c.timeout)
|
||||
conn, err = ConnectHost(item.Host, c.timeout)
|
||||
if err == nil {
|
||||
conn.SetDeadline(time.Now().Add(time.Millisecond))
|
||||
conn.Close()
|
||||
}
|
||||
if err != nil {
|
||||
item.FailCount = item.FailCount + 1
|
||||
@ -155,28 +154,23 @@ func (c *Checker) domainIsInMap(address string, blockedMap bool) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
func (c *Checker) Add(address string, isHTTPS bool, method, URL string, data []byte) {
|
||||
func (c *Checker) Add(address string) {
|
||||
if c.domainIsInMap(address, false) || c.domainIsInMap(address, true) {
|
||||
return
|
||||
}
|
||||
if !isHTTPS && strings.ToLower(method) != "get" {
|
||||
return
|
||||
}
|
||||
var item CheckerItem
|
||||
u := strings.Split(address, ":")
|
||||
item = CheckerItem{
|
||||
URL: URL,
|
||||
Domain: u[0],
|
||||
Host: address,
|
||||
Data: data,
|
||||
IsHTTPS: isHTTPS,
|
||||
Method: method,
|
||||
Host: address,
|
||||
}
|
||||
c.data.SetIfAbsent(item.Host, item)
|
||||
}
|
||||
|
||||
type BasicAuth struct {
|
||||
data ConcurrentMap
|
||||
data ConcurrentMap
|
||||
authURL string
|
||||
authOkCode int
|
||||
authTimeout int
|
||||
authRetry int
|
||||
}
|
||||
|
||||
func NewBasicAuth() BasicAuth {
|
||||
@ -184,6 +178,12 @@ func NewBasicAuth() BasicAuth {
|
||||
data: NewConcurrentMap(),
|
||||
}
|
||||
}
|
||||
func (ba *BasicAuth) SetAuthURL(URL string, code, timeout, retry int) {
|
||||
ba.authURL = URL
|
||||
ba.authOkCode = code
|
||||
ba.authTimeout = timeout
|
||||
ba.authRetry = retry
|
||||
}
|
||||
func (ba *BasicAuth) AddFromFile(file string) (n int, err error) {
|
||||
_content, err := ioutil.ReadFile(file)
|
||||
if err != nil {
|
||||
@ -213,21 +213,69 @@ func (ba *BasicAuth) Add(userpassArr []string) (n int) {
|
||||
}
|
||||
return
|
||||
}
|
||||
func (ba *BasicAuth) CheckUserPass(user, pass string) (ok bool) {
|
||||
if p, _ok := ba.data.Get(user); _ok {
|
||||
return p.(string) == pass
|
||||
}
|
||||
return
|
||||
func (ba *BasicAuth) CheckUserPass(user, pass, ip, target string) (ok bool) {
|
||||
|
||||
return ba.Check(user+":"+pass, ip, target)
|
||||
}
|
||||
func (ba *BasicAuth) Check(userpass string) (ok bool) {
|
||||
func (ba *BasicAuth) Check(userpass string, ip, target string) (ok bool) {
|
||||
u := strings.Split(strings.Trim(userpass, " "), ":")
|
||||
if len(u) == 2 {
|
||||
if p, _ok := ba.data.Get(u[0]); _ok {
|
||||
return p.(string) == u[1]
|
||||
}
|
||||
if ba.authURL != "" {
|
||||
err := ba.checkFromURL(userpass, ip, target)
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
log.Printf("%s", err)
|
||||
}
|
||||
return false
|
||||
}
|
||||
return
|
||||
}
|
||||
func (ba *BasicAuth) checkFromURL(userpass, ip, target string) (err error) {
|
||||
u := strings.Split(strings.Trim(userpass, " "), ":")
|
||||
if len(u) != 2 {
|
||||
return
|
||||
}
|
||||
URL := ba.authURL
|
||||
if strings.Contains(URL, "?") {
|
||||
URL += "&"
|
||||
} else {
|
||||
URL += "?"
|
||||
}
|
||||
URL += fmt.Sprintf("user=%s&pass=%s&ip=%s&target=%s", u[0], u[1], ip, target)
|
||||
var code int
|
||||
var tryCount = 0
|
||||
var body []byte
|
||||
for tryCount <= ba.authRetry {
|
||||
body, code, err = HttpGet(URL, ba.authTimeout)
|
||||
if err == nil && code == ba.authOkCode {
|
||||
break
|
||||
} else if err != nil {
|
||||
err = fmt.Errorf("auth fail from url %s,resonse err:%s , %s", URL, err, ip)
|
||||
} else {
|
||||
if len(body) > 0 {
|
||||
err = fmt.Errorf(string(body[0:100]))
|
||||
} else {
|
||||
err = fmt.Errorf("token error")
|
||||
}
|
||||
err = fmt.Errorf("auth fail from url %s,resonse code: %d, except: %d , %s , %s", URL, code, ba.authOkCode, ip, string(body))
|
||||
}
|
||||
if err != nil && tryCount < ba.authRetry {
|
||||
log.Print(err)
|
||||
time.Sleep(time.Second * 2)
|
||||
}
|
||||
tryCount++
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
//log.Printf("auth success from auth url, %s", ip)
|
||||
return
|
||||
}
|
||||
|
||||
func (ba *BasicAuth) Total() (n int) {
|
||||
n = ba.data.Count()
|
||||
return
|
||||
@ -259,13 +307,23 @@ func NewHTTPRequest(inConn *net.Conn, bufSize int, isBasicAuth bool, basicAuth *
|
||||
return
|
||||
}
|
||||
req.HeadBuf = buf[:len]
|
||||
index := bytes.IndexByte(req.HeadBuf, '\n')
|
||||
if index == -1 {
|
||||
err = fmt.Errorf("http decoder data line err:%s", SubStr(string(req.HeadBuf), 0, 50))
|
||||
CloseConn(inConn)
|
||||
return
|
||||
//fmt.Println(string(req.HeadBuf))
|
||||
//try sni
|
||||
serverName, err0 := sni.ServerNameFromBytes(req.HeadBuf)
|
||||
if err0 == nil {
|
||||
//sni success
|
||||
req.Method = "SNI"
|
||||
req.hostOrURL = "https://" + serverName + ":443"
|
||||
} else {
|
||||
//sni fail , try http
|
||||
index := bytes.IndexByte(req.HeadBuf, '\n')
|
||||
if index == -1 {
|
||||
err = fmt.Errorf("http decoder data line err:%s", SubStr(string(req.HeadBuf), 0, 50))
|
||||
CloseConn(inConn)
|
||||
return
|
||||
}
|
||||
fmt.Sscanf(string(req.HeadBuf[:index]), "%s%s", &req.Method, &req.hostOrURL)
|
||||
}
|
||||
fmt.Sscanf(string(req.HeadBuf[:index]), "%s%s", &req.Method, &req.hostOrURL)
|
||||
if req.Method == "" || req.hostOrURL == "" {
|
||||
err = fmt.Errorf("http decoder data err:%s", SubStr(string(req.HeadBuf), 0, 50))
|
||||
CloseConn(inConn)
|
||||
@ -292,13 +350,23 @@ func (req *HTTPRequest) HTTP() (err error) {
|
||||
}
|
||||
req.URL, err = req.getHTTPURL()
|
||||
if err == nil {
|
||||
u, _ := url.Parse(req.URL)
|
||||
var u *url.URL
|
||||
u, err = url.Parse(req.URL)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
req.Host = u.Host
|
||||
req.addPortIfNot()
|
||||
}
|
||||
return
|
||||
}
|
||||
func (req *HTTPRequest) HTTPS() (err error) {
|
||||
if req.isBasicAuth {
|
||||
err = req.BasicAuth()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
req.Host = req.hostOrURL
|
||||
req.addPortIfNot()
|
||||
//_, err = fmt.Fprint(*req.conn, "HTTP/1.1 200 Connection established\r\n\r\n")
|
||||
@ -314,13 +382,23 @@ func (req *HTTPRequest) IsHTTPS() bool {
|
||||
|
||||
func (req *HTTPRequest) BasicAuth() (err error) {
|
||||
|
||||
//log.Printf("request :%s", string(b[:n]))
|
||||
//log.Printf("request :%s", string(b[:n]))authorization
|
||||
isProxyAuthorization := false
|
||||
authorization, err := req.getHeader("Authorization")
|
||||
if err != nil {
|
||||
fmt.Fprint((*req.conn), "HTTP/1.1 401 Unauthorized\r\nWWW-Authenticate: Basic realm=\"\"\r\n\r\nUnauthorized")
|
||||
CloseConn(req.conn)
|
||||
return
|
||||
}
|
||||
if authorization == "" {
|
||||
authorization, err = req.getHeader("Proxy-Authorization")
|
||||
if err != nil {
|
||||
fmt.Fprint((*req.conn), "HTTP/1.1 407 Unauthorized\r\nWWW-Authenticate: Basic realm=\"\"\r\n\r\nUnauthorized")
|
||||
CloseConn(req.conn)
|
||||
return
|
||||
}
|
||||
isProxyAuthorization = true
|
||||
}
|
||||
//log.Printf("Authorization:%s", authorization)
|
||||
basic := strings.Fields(authorization)
|
||||
if len(basic) != 2 {
|
||||
@ -334,10 +412,21 @@ func (req *HTTPRequest) BasicAuth() (err error) {
|
||||
CloseConn(req.conn)
|
||||
return
|
||||
}
|
||||
authOk := (*req.basicAuth).Check(string(user))
|
||||
addr := strings.Split((*req.conn).RemoteAddr().String(), ":")
|
||||
URL := ""
|
||||
if req.IsHTTPS() {
|
||||
URL = "https://" + req.Host
|
||||
} else {
|
||||
URL, _ = req.getHTTPURL()
|
||||
}
|
||||
authOk := (*req.basicAuth).Check(string(user), addr[0], URL)
|
||||
//log.Printf("auth %s,%v", string(user), authOk)
|
||||
if !authOk {
|
||||
fmt.Fprint((*req.conn), "HTTP/1.1 401 Unauthorized\r\n\r\nUnauthorized")
|
||||
code := "401"
|
||||
if isProxyAuthorization {
|
||||
code = "407"
|
||||
}
|
||||
fmt.Fprintf((*req.conn), "HTTP/1.1 %s Unauthorized\r\n\r\nUnauthorized", code)
|
||||
CloseConn(req.conn)
|
||||
err = fmt.Errorf("basic auth fail")
|
||||
return
|
||||
@ -358,6 +447,7 @@ func (req *HTTPRequest) getHTTPURL() (URL string, err error) {
|
||||
func (req *HTTPRequest) getHeader(key string) (val string, err error) {
|
||||
key = strings.ToUpper(key)
|
||||
lines := strings.Split(string(req.HeadBuf), "\r\n")
|
||||
//log.Println(lines)
|
||||
for _, line := range lines {
|
||||
line := strings.SplitN(strings.Trim(line, "\r\n "), ":", 2)
|
||||
if len(line) == 2 {
|
||||
@ -369,7 +459,6 @@ func (req *HTTPRequest) getHeader(key string) (val string, err error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
err = fmt.Errorf("can not find HOST header")
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
27
vendor/github.com/alecthomas/template/LICENSE
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
25
vendor/github.com/alecthomas/template/README.md
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
# Go's `text/template` package with newline elision
|
||||
|
||||
This is a fork of Go 1.4's [text/template](http://golang.org/pkg/text/template/) package with one addition: a backslash immediately after a closing delimiter will delete all subsequent newlines until a non-newline.
|
||||
|
||||
eg.
|
||||
|
||||
```
|
||||
{{if true}}\
|
||||
hello
|
||||
{{end}}\
|
||||
```
|
||||
|
||||
Will result in:
|
||||
|
||||
```
|
||||
hello\n
|
||||
```
|
||||
|
||||
Rather than:
|
||||
|
||||
```
|
||||
\n
|
||||
hello\n
|
||||
\n
|
||||
```
|
||||
406
vendor/github.com/alecthomas/template/doc.go
generated
vendored
Normal file
@ -0,0 +1,406 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package template implements data-driven templates for generating textual output.
|
||||
|
||||
To generate HTML output, see package html/template, which has the same interface
|
||||
as this package but automatically secures HTML output against certain attacks.
|
||||
|
||||
Templates are executed by applying them to a data structure. Annotations in the
|
||||
template refer to elements of the data structure (typically a field of a struct
|
||||
or a key in a map) to control execution and derive values to be displayed.
|
||||
Execution of the template walks the structure and sets the cursor, represented
|
||||
by a period '.' and called "dot", to the value at the current location in the
|
||||
structure as execution proceeds.
|
||||
|
||||
The input text for a template is UTF-8-encoded text in any format.
|
||||
"Actions"--data evaluations or control structures--are delimited by
|
||||
"{{" and "}}"; all text outside actions is copied to the output unchanged.
|
||||
Actions may not span newlines, although comments can.
|
||||
|
||||
Once parsed, a template may be executed safely in parallel.
|
||||
|
||||
Here is a trivial example that prints "17 items are made of wool".
|
||||
|
||||
type Inventory struct {
|
||||
Material string
|
||||
Count uint
|
||||
}
|
||||
sweaters := Inventory{"wool", 17}
|
||||
tmpl, err := template.New("test").Parse("{{.Count}} items are made of {{.Material}}")
|
||||
if err != nil { panic(err) }
|
||||
err = tmpl.Execute(os.Stdout, sweaters)
|
||||
if err != nil { panic(err) }
|
||||
|
||||
More intricate examples appear below.
|
||||
|
||||
Actions
|
||||
|
||||
Here is the list of actions. "Arguments" and "pipelines" are evaluations of
|
||||
data, defined in detail below.
|
||||
|
||||
*/
|
||||
// {{/* a comment */}}
|
||||
// A comment; discarded. May contain newlines.
|
||||
// Comments do not nest and must start and end at the
|
||||
// delimiters, as shown here.
|
||||
/*
|
||||
|
||||
{{pipeline}}
|
||||
The default textual representation of the value of the pipeline
|
||||
is copied to the output.
|
||||
|
||||
{{if pipeline}} T1 {{end}}
|
||||
If the value of the pipeline is empty, no output is generated;
|
||||
otherwise, T1 is executed. The empty values are false, 0, any
|
||||
nil pointer or interface value, and any array, slice, map, or
|
||||
string of length zero.
|
||||
Dot is unaffected.
|
||||
|
||||
{{if pipeline}} T1 {{else}} T0 {{end}}
|
||||
If the value of the pipeline is empty, T0 is executed;
|
||||
otherwise, T1 is executed. Dot is unaffected.
|
||||
|
||||
{{if pipeline}} T1 {{else if pipeline}} T0 {{end}}
|
||||
To simplify the appearance of if-else chains, the else action
|
||||
of an if may include another if directly; the effect is exactly
|
||||
the same as writing
|
||||
{{if pipeline}} T1 {{else}}{{if pipeline}} T0 {{end}}{{end}}
|
||||
|
||||
{{range pipeline}} T1 {{end}}
|
||||
The value of the pipeline must be an array, slice, map, or channel.
|
||||
If the value of the pipeline has length zero, nothing is output;
|
||||
otherwise, dot is set to the successive elements of the array,
|
||||
slice, or map and T1 is executed. If the value is a map and the
|
||||
keys are of basic type with a defined order ("comparable"), the
|
||||
elements will be visited in sorted key order.
|
||||
|
||||
{{range pipeline}} T1 {{else}} T0 {{end}}
|
||||
The value of the pipeline must be an array, slice, map, or channel.
|
||||
If the value of the pipeline has length zero, dot is unaffected and
|
||||
T0 is executed; otherwise, dot is set to the successive elements
|
||||
of the array, slice, or map and T1 is executed.
|
||||
|
||||
{{template "name"}}
|
||||
The template with the specified name is executed with nil data.
|
||||
|
||||
{{template "name" pipeline}}
|
||||
The template with the specified name is executed with dot set
|
||||
to the value of the pipeline.
|
||||
|
||||
{{with pipeline}} T1 {{end}}
|
||||
If the value of the pipeline is empty, no output is generated;
|
||||
otherwise, dot is set to the value of the pipeline and T1 is
|
||||
executed.
|
||||
|
||||
{{with pipeline}} T1 {{else}} T0 {{end}}
|
||||
If the value of the pipeline is empty, dot is unaffected and T0
|
||||
is executed; otherwise, dot is set to the value of the pipeline
|
||||
and T1 is executed.
|
||||
|
||||
Arguments
|
||||
|
||||
An argument is a simple value, denoted by one of the following.
|
||||
|
||||
- A boolean, string, character, integer, floating-point, imaginary
|
||||
or complex constant in Go syntax. These behave like Go's untyped
|
||||
constants, although raw strings may not span newlines.
|
||||
- The keyword nil, representing an untyped Go nil.
|
||||
- The character '.' (period):
|
||||
.
|
||||
The result is the value of dot.
|
||||
- A variable name, which is a (possibly empty) alphanumeric string
|
||||
preceded by a dollar sign, such as
|
||||
$piOver2
|
||||
or
|
||||
$
|
||||
The result is the value of the variable.
|
||||
Variables are described below.
|
||||
- The name of a field of the data, which must be a struct, preceded
|
||||
by a period, such as
|
||||
.Field
|
||||
The result is the value of the field. Field invocations may be
|
||||
chained:
|
||||
.Field1.Field2
|
||||
Fields can also be evaluated on variables, including chaining:
|
||||
$x.Field1.Field2
|
||||
- The name of a key of the data, which must be a map, preceded
|
||||
by a period, such as
|
||||
.Key
|
||||
The result is the map element value indexed by the key.
|
||||
Key invocations may be chained and combined with fields to any
|
||||
depth:
|
||||
.Field1.Key1.Field2.Key2
|
||||
Although the key must be an alphanumeric identifier, unlike with
|
||||
field names they do not need to start with an upper case letter.
|
||||
Keys can also be evaluated on variables, including chaining:
|
||||
$x.key1.key2
|
||||
- The name of a niladic method of the data, preceded by a period,
|
||||
such as
|
||||
.Method
|
||||
The result is the value of invoking the method with dot as the
|
||||
receiver, dot.Method(). Such a method must have one return value (of
|
||||
any type) or two return values, the second of which is an error.
|
||||
If it has two and the returned error is non-nil, execution terminates
|
||||
and an error is returned to the caller as the value of Execute.
|
||||
Method invocations may be chained and combined with fields and keys
|
||||
to any depth:
|
||||
.Field1.Key1.Method1.Field2.Key2.Method2
|
||||
Methods can also be evaluated on variables, including chaining:
|
||||
$x.Method1.Field
|
||||
- The name of a niladic function, such as
|
||||
fun
|
||||
The result is the value of invoking the function, fun(). The return
|
||||
types and values behave as in methods. Functions and function
|
||||
names are described below.
|
||||
- A parenthesized instance of one the above, for grouping. The result
|
||||
may be accessed by a field or map key invocation.
|
||||
print (.F1 arg1) (.F2 arg2)
|
||||
(.StructValuedMethod "arg").Field
|
||||
|
||||
Arguments may evaluate to any type; if they are pointers the implementation
|
||||
automatically indirects to the base type when required.
|
||||
If an evaluation yields a function value, such as a function-valued
|
||||
field of a struct, the function is not invoked automatically, but it
|
||||
can be used as a truth value for an if action and the like. To invoke
|
||||
it, use the call function, defined below.
|
||||
|
||||
A pipeline is a possibly chained sequence of "commands". A command is a simple
|
||||
value (argument) or a function or method call, possibly with multiple arguments:
|
||||
|
||||
Argument
|
||||
The result is the value of evaluating the argument.
|
||||
.Method [Argument...]
|
||||
The method can be alone or the last element of a chain but,
|
||||
unlike methods in the middle of a chain, it can take arguments.
|
||||
The result is the value of calling the method with the
|
||||
arguments:
|
||||
dot.Method(Argument1, etc.)
|
||||
functionName [Argument...]
|
||||
The result is the value of calling the function associated
|
||||
with the name:
|
||||
function(Argument1, etc.)
|
||||
Functions and function names are described below.
|
||||
|
||||
Pipelines
|
||||
|
||||
A pipeline may be "chained" by separating a sequence of commands with pipeline
|
||||
characters '|'. In a chained pipeline, the result of the each command is
|
||||
passed as the last argument of the following command. The output of the final
|
||||
command in the pipeline is the value of the pipeline.
|
||||
|
||||
The output of a command will be either one value or two values, the second of
|
||||
which has type error. If that second value is present and evaluates to
|
||||
non-nil, execution terminates and the error is returned to the caller of
|
||||
Execute.
|
||||
|
||||
Variables
|
||||
|
||||
A pipeline inside an action may initialize a variable to capture the result.
|
||||
The initialization has syntax
|
||||
|
||||
$variable := pipeline
|
||||
|
||||
where $variable is the name of the variable. An action that declares a
|
||||
variable produces no output.
|
||||
|
||||
If a "range" action initializes a variable, the variable is set to the
|
||||
successive elements of the iteration. Also, a "range" may declare two
|
||||
variables, separated by a comma:
|
||||
|
||||
range $index, $element := pipeline
|
||||
|
||||
in which case $index and $element are set to the successive values of the
|
||||
array/slice index or map key and element, respectively. Note that if there is
|
||||
only one variable, it is assigned the element; this is opposite to the
|
||||
convention in Go range clauses.
|
||||
|
||||
A variable's scope extends to the "end" action of the control structure ("if",
|
||||
"with", or "range") in which it is declared, or to the end of the template if
|
||||
there is no such control structure. A template invocation does not inherit
|
||||
variables from the point of its invocation.
|
||||
|
||||
When execution begins, $ is set to the data argument passed to Execute, that is,
|
||||
to the starting value of dot.
|
||||
|
||||
Examples
|
||||
|
||||
Here are some example one-line templates demonstrating pipelines and variables.
|
||||
All produce the quoted word "output":
|
||||
|
||||
{{"\"output\""}}
|
||||
A string constant.
|
||||
{{`"output"`}}
|
||||
A raw string constant.
|
||||
{{printf "%q" "output"}}
|
||||
A function call.
|
||||
{{"output" | printf "%q"}}
|
||||
A function call whose final argument comes from the previous
|
||||
command.
|
||||
{{printf "%q" (print "out" "put")}}
|
||||
A parenthesized argument.
|
||||
{{"put" | printf "%s%s" "out" | printf "%q"}}
|
||||
A more elaborate call.
|
||||
{{"output" | printf "%s" | printf "%q"}}
|
||||
A longer chain.
|
||||
{{with "output"}}{{printf "%q" .}}{{end}}
|
||||
A with action using dot.
|
||||
{{with $x := "output" | printf "%q"}}{{$x}}{{end}}
|
||||
A with action that creates and uses a variable.
|
||||
{{with $x := "output"}}{{printf "%q" $x}}{{end}}
|
||||
A with action that uses the variable in another action.
|
||||
{{with $x := "output"}}{{$x | printf "%q"}}{{end}}
|
||||
The same, but pipelined.
|
||||
|
||||
Functions
|
||||
|
||||
During execution functions are found in two function maps: first in the
|
||||
template, then in the global function map. By default, no functions are defined
|
||||
in the template but the Funcs method can be used to add them.
|
||||
|
||||
Predefined global functions are named as follows.
|
||||
|
||||
and
|
||||
Returns the boolean AND of its arguments by returning the
|
||||
first empty argument or the last argument, that is,
|
||||
"and x y" behaves as "if x then y else x". All the
|
||||
arguments are evaluated.
|
||||
call
|
||||
Returns the result of calling the first argument, which
|
||||
must be a function, with the remaining arguments as parameters.
|
||||
Thus "call .X.Y 1 2" is, in Go notation, dot.X.Y(1, 2) where
|
||||
Y is a func-valued field, map entry, or the like.
|
||||
The first argument must be the result of an evaluation
|
||||
that yields a value of function type (as distinct from
|
||||
a predefined function such as print). The function must
|
||||
return either one or two result values, the second of which
|
||||
is of type error. If the arguments don't match the function
|
||||
or the returned error value is non-nil, execution stops.
|
||||
html
|
||||
Returns the escaped HTML equivalent of the textual
|
||||
representation of its arguments.
|
||||
index
|
||||
Returns the result of indexing its first argument by the
|
||||
following arguments. Thus "index x 1 2 3" is, in Go syntax,
|
||||
x[1][2][3]. Each indexed item must be a map, slice, or array.
|
||||
js
|
||||
Returns the escaped JavaScript equivalent of the textual
|
||||
representation of its arguments.
|
||||
len
|
||||
Returns the integer length of its argument.
|
||||
not
|
||||
Returns the boolean negation of its single argument.
|
||||
or
|
||||
Returns the boolean OR of its arguments by returning the
|
||||
first non-empty argument or the last argument, that is,
|
||||
"or x y" behaves as "if x then x else y". All the
|
||||
arguments are evaluated.
|
||||
print
|
||||
An alias for fmt.Sprint
|
||||
printf
|
||||
An alias for fmt.Sprintf
|
||||
println
|
||||
An alias for fmt.Sprintln
|
||||
urlquery
|
||||
Returns the escaped value of the textual representation of
|
||||
its arguments in a form suitable for embedding in a URL query.
|
||||
|
||||
The boolean functions take any zero value to be false and a non-zero
|
||||
value to be true.
|
||||
|
||||
There is also a set of binary comparison operators defined as
|
||||
functions:
|
||||
|
||||
eq
|
||||
Returns the boolean truth of arg1 == arg2
|
||||
ne
|
||||
Returns the boolean truth of arg1 != arg2
|
||||
lt
|
||||
Returns the boolean truth of arg1 < arg2
|
||||
le
|
||||
Returns the boolean truth of arg1 <= arg2
|
||||
gt
|
||||
Returns the boolean truth of arg1 > arg2
|
||||
ge
|
||||
Returns the boolean truth of arg1 >= arg2
|
||||
|
||||
For simpler multi-way equality tests, eq (only) accepts two or more
|
||||
arguments and compares the second and subsequent to the first,
|
||||
returning in effect
|
||||
|
||||
arg1==arg2 || arg1==arg3 || arg1==arg4 ...
|
||||
|
||||
(Unlike with || in Go, however, eq is a function call and all the
|
||||
arguments will be evaluated.)
|
||||
|
||||
The comparison functions work on basic types only (or named basic
|
||||
types, such as "type Celsius float32"). They implement the Go rules
|
||||
for comparison of values, except that size and exact type are
|
||||
ignored, so any integer value, signed or unsigned, may be compared
|
||||
with any other integer value. (The arithmetic value is compared,
|
||||
not the bit pattern, so all negative integers are less than all
|
||||
unsigned integers.) However, as usual, one may not compare an int
|
||||
with a float32 and so on.
|
||||
|
||||
Associated templates
|
||||
|
||||
Each template is named by a string specified when it is created. Also, each
|
||||
template is associated with zero or more other templates that it may invoke by
|
||||
name; such associations are transitive and form a name space of templates.
|
||||
|
||||
A template may use a template invocation to instantiate another associated
|
||||
template; see the explanation of the "template" action above. The name must be
|
||||
that of a template associated with the template that contains the invocation.
|
||||
|
||||
Nested template definitions
|
||||
|
||||
When parsing a template, another template may be defined and associated with the
|
||||
template being parsed. Template definitions must appear at the top level of the
|
||||
template, much like global variables in a Go program.
|
||||
|
||||
The syntax of such definitions is to surround each template declaration with a
|
||||
"define" and "end" action.
|
||||
|
||||
The define action names the template being created by providing a string
|
||||
constant. Here is a simple example:
|
||||
|
||||
`{{define "T1"}}ONE{{end}}
|
||||
{{define "T2"}}TWO{{end}}
|
||||
{{define "T3"}}{{template "T1"}} {{template "T2"}}{{end}}
|
||||
{{template "T3"}}`
|
||||
|
||||
This defines two templates, T1 and T2, and a third T3 that invokes the other two
|
||||
when it is executed. Finally it invokes T3. If executed this template will
|
||||
produce the text
|
||||
|
||||
ONE TWO
|
||||
|
||||
By construction, a template may reside in only one association. If it's
|
||||
necessary to have a template addressable from multiple associations, the
|
||||
template definition must be parsed multiple times to create distinct *Template
|
||||
values, or must be copied with the Clone or AddParseTree method.
|
||||
|
||||
Parse may be called multiple times to assemble the various associated templates;
|
||||
see the ParseFiles and ParseGlob functions and methods for simple ways to parse
|
||||
related templates stored in files.
|
||||
|
||||
A template may be executed directly or through ExecuteTemplate, which executes
|
||||
an associated template identified by name. To invoke our example above, we
|
||||
might write,
|
||||
|
||||
err := tmpl.Execute(os.Stdout, "no data needed")
|
||||
if err != nil {
|
||||
log.Fatalf("execution failed: %s", err)
|
||||
}
|
||||
|
||||
or to invoke a particular template explicitly by name,
|
||||
|
||||
err := tmpl.ExecuteTemplate(os.Stdout, "T2", "no data needed")
|
||||
if err != nil {
|
||||
log.Fatalf("execution failed: %s", err)
|
||||
}
|
||||
|
||||
*/
|
||||
package template
|
||||
845
vendor/github.com/alecthomas/template/exec.go
generated
vendored
Normal file
@ -0,0 +1,845 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package template
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/alecthomas/template/parse"
|
||||
)
|
||||
|
||||
// state represents the state of an execution. It's not part of the
|
||||
// template so that multiple executions of the same template
|
||||
// can execute in parallel.
|
||||
type state struct {
|
||||
tmpl *Template
|
||||
wr io.Writer
|
||||
node parse.Node // current node, for errors
|
||||
vars []variable // push-down stack of variable values.
|
||||
}
|
||||
|
||||
// variable holds the dynamic value of a variable such as $, $x etc.
|
||||
type variable struct {
|
||||
name string
|
||||
value reflect.Value
|
||||
}
|
||||
|
||||
// push pushes a new variable on the stack.
|
||||
func (s *state) push(name string, value reflect.Value) {
|
||||
s.vars = append(s.vars, variable{name, value})
|
||||
}
|
||||
|
||||
// mark returns the length of the variable stack.
|
||||
func (s *state) mark() int {
|
||||
return len(s.vars)
|
||||
}
|
||||
|
||||
// pop pops the variable stack up to the mark.
|
||||
func (s *state) pop(mark int) {
|
||||
s.vars = s.vars[0:mark]
|
||||
}
|
||||
|
||||
// setVar overwrites the top-nth variable on the stack. Used by range iterations.
|
||||
func (s *state) setVar(n int, value reflect.Value) {
|
||||
s.vars[len(s.vars)-n].value = value
|
||||
}
|
||||
|
||||
// varValue returns the value of the named variable.
|
||||
func (s *state) varValue(name string) reflect.Value {
|
||||
for i := s.mark() - 1; i >= 0; i-- {
|
||||
if s.vars[i].name == name {
|
||||
return s.vars[i].value
|
||||
}
|
||||
}
|
||||
s.errorf("undefined variable: %s", name)
|
||||
return zero
|
||||
}
|
||||
|
||||
var zero reflect.Value
|
||||
|
||||
// at marks the state to be on node n, for error reporting.
|
||||
func (s *state) at(node parse.Node) {
|
||||
s.node = node
|
||||
}
|
||||
|
||||
// doublePercent returns the string with %'s replaced by %%, if necessary,
|
||||
// so it can be used safely inside a Printf format string.
|
||||
func doublePercent(str string) string {
|
||||
if strings.Contains(str, "%") {
|
||||
str = strings.Replace(str, "%", "%%", -1)
|
||||
}
|
||||
return str
|
||||
}
|
||||
|
||||
// errorf formats the error and terminates processing.
|
||||
func (s *state) errorf(format string, args ...interface{}) {
|
||||
name := doublePercent(s.tmpl.Name())
|
||||
if s.node == nil {
|
||||
format = fmt.Sprintf("template: %s: %s", name, format)
|
||||
} else {
|
||||
location, context := s.tmpl.ErrorContext(s.node)
|
||||
format = fmt.Sprintf("template: %s: executing %q at <%s>: %s", location, name, doublePercent(context), format)
|
||||
}
|
||||
panic(fmt.Errorf(format, args...))
|
||||
}
|
||||
|
||||
// errRecover is the handler that turns panics into returns from the top
|
||||
// level of Parse.
|
||||
func errRecover(errp *error) {
|
||||
e := recover()
|
||||
if e != nil {
|
||||
switch err := e.(type) {
|
||||
case runtime.Error:
|
||||
panic(e)
|
||||
case error:
|
||||
*errp = err
|
||||
default:
|
||||
panic(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ExecuteTemplate applies the template associated with t that has the given name
|
||||
// to the specified data object and writes the output to wr.
|
||||
// If an error occurs executing the template or writing its output,
|
||||
// execution stops, but partial results may already have been written to
|
||||
// the output writer.
|
||||
// A template may be executed safely in parallel.
|
||||
func (t *Template) ExecuteTemplate(wr io.Writer, name string, data interface{}) error {
|
||||
tmpl := t.tmpl[name]
|
||||
if tmpl == nil {
|
||||
return fmt.Errorf("template: no template %q associated with template %q", name, t.name)
|
||||
}
|
||||
return tmpl.Execute(wr, data)
|
||||
}
|
||||
|
||||
// Execute applies a parsed template to the specified data object,
|
||||
// and writes the output to wr.
|
||||
// If an error occurs executing the template or writing its output,
|
||||
// execution stops, but partial results may already have been written to
|
||||
// the output writer.
|
||||
// A template may be executed safely in parallel.
|
||||
func (t *Template) Execute(wr io.Writer, data interface{}) (err error) {
|
||||
defer errRecover(&err)
|
||||
value := reflect.ValueOf(data)
|
||||
state := &state{
|
||||
tmpl: t,
|
||||
wr: wr,
|
||||
vars: []variable{{"$", value}},
|
||||
}
|
||||
t.init()
|
||||
if t.Tree == nil || t.Root == nil {
|
||||
var b bytes.Buffer
|
||||
for name, tmpl := range t.tmpl {
|
||||
if tmpl.Tree == nil || tmpl.Root == nil {
|
||||
continue
|
||||
}
|
||||
if b.Len() > 0 {
|
||||
b.WriteString(", ")
|
||||
}
|
||||
fmt.Fprintf(&b, "%q", name)
|
||||
}
|
||||
var s string
|
||||
if b.Len() > 0 {
|
||||
s = "; defined templates are: " + b.String()
|
||||
}
|
||||
state.errorf("%q is an incomplete or empty template%s", t.Name(), s)
|
||||
}
|
||||
state.walk(value, t.Root)
|
||||
return
|
||||
}
|
||||
|
||||
// Walk functions step through the major pieces of the template structure,
|
||||
// generating output as they go.
|
||||
func (s *state) walk(dot reflect.Value, node parse.Node) {
|
||||
s.at(node)
|
||||
switch node := node.(type) {
|
||||
case *parse.ActionNode:
|
||||
// Do not pop variables so they persist until next end.
|
||||
// Also, if the action declares variables, don't print the result.
|
||||
val := s.evalPipeline(dot, node.Pipe)
|
||||
if len(node.Pipe.Decl) == 0 {
|
||||
s.printValue(node, val)
|
||||
}
|
||||
case *parse.IfNode:
|
||||
s.walkIfOrWith(parse.NodeIf, dot, node.Pipe, node.List, node.ElseList)
|
||||
case *parse.ListNode:
|
||||
for _, node := range node.Nodes {
|
||||
s.walk(dot, node)
|
||||
}
|
||||
case *parse.RangeNode:
|
||||
s.walkRange(dot, node)
|
||||
case *parse.TemplateNode:
|
||||
s.walkTemplate(dot, node)
|
||||
case *parse.TextNode:
|
||||
if _, err := s.wr.Write(node.Text); err != nil {
|
||||
s.errorf("%s", err)
|
||||
}
|
||||
case *parse.WithNode:
|
||||
s.walkIfOrWith(parse.NodeWith, dot, node.Pipe, node.List, node.ElseList)
|
||||
default:
|
||||
s.errorf("unknown node: %s", node)
|
||||
}
|
||||
}
|
||||
|
||||
// walkIfOrWith walks an 'if' or 'with' node. The two control structures
|
||||
// are identical in behavior except that 'with' sets dot.
|
||||
func (s *state) walkIfOrWith(typ parse.NodeType, dot reflect.Value, pipe *parse.PipeNode, list, elseList *parse.ListNode) {
|
||||
defer s.pop(s.mark())
|
||||
val := s.evalPipeline(dot, pipe)
|
||||
truth, ok := isTrue(val)
|
||||
if !ok {
|
||||
s.errorf("if/with can't use %v", val)
|
||||
}
|
||||
if truth {
|
||||
if typ == parse.NodeWith {
|
||||
s.walk(val, list)
|
||||
} else {
|
||||
s.walk(dot, list)
|
||||
}
|
||||
} else if elseList != nil {
|
||||
s.walk(dot, elseList)
|
||||
}
|
||||
}
|
||||
|
||||
// isTrue reports whether the value is 'true', in the sense of not the zero of its type,
|
||||
// and whether the value has a meaningful truth value.
|
||||
func isTrue(val reflect.Value) (truth, ok bool) {
|
||||
if !val.IsValid() {
|
||||
// Something like var x interface{}, never set. It's a form of nil.
|
||||
return false, true
|
||||
}
|
||||
switch val.Kind() {
|
||||
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
|
||||
truth = val.Len() > 0
|
||||
case reflect.Bool:
|
||||
truth = val.Bool()
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
truth = val.Complex() != 0
|
||||
case reflect.Chan, reflect.Func, reflect.Ptr, reflect.Interface:
|
||||
truth = !val.IsNil()
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
truth = val.Int() != 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
truth = val.Float() != 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
truth = val.Uint() != 0
|
||||
case reflect.Struct:
|
||||
truth = true // Struct values are always true.
|
||||
default:
|
||||
return
|
||||
}
|
||||
return truth, true
|
||||
}
|
||||
|
||||
func (s *state) walkRange(dot reflect.Value, r *parse.RangeNode) {
|
||||
s.at(r)
|
||||
defer s.pop(s.mark())
|
||||
val, _ := indirect(s.evalPipeline(dot, r.Pipe))
|
||||
// mark top of stack before any variables in the body are pushed.
|
||||
mark := s.mark()
|
||||
oneIteration := func(index, elem reflect.Value) {
|
||||
// Set top var (lexically the second if there are two) to the element.
|
||||
if len(r.Pipe.Decl) > 0 {
|
||||
s.setVar(1, elem)
|
||||
}
|
||||
// Set next var (lexically the first if there are two) to the index.
|
||||
if len(r.Pipe.Decl) > 1 {
|
||||
s.setVar(2, index)
|
||||
}
|
||||
s.walk(elem, r.List)
|
||||
s.pop(mark)
|
||||
}
|
||||
switch val.Kind() {
|
||||
case reflect.Array, reflect.Slice:
|
||||
if val.Len() == 0 {
|
||||
break
|
||||
}
|
||||
for i := 0; i < val.Len(); i++ {
|
||||
oneIteration(reflect.ValueOf(i), val.Index(i))
|
||||
}
|
||||
return
|
||||
case reflect.Map:
|
||||
if val.Len() == 0 {
|
||||
break
|
||||
}
|
||||
for _, key := range sortKeys(val.MapKeys()) {
|
||||
oneIteration(key, val.MapIndex(key))
|
||||
}
|
||||
return
|
||||
case reflect.Chan:
|
||||
if val.IsNil() {
|
||||
break
|
||||
}
|
||||
i := 0
|
||||
for ; ; i++ {
|
||||
elem, ok := val.Recv()
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
oneIteration(reflect.ValueOf(i), elem)
|
||||
}
|
||||
if i == 0 {
|
||||
break
|
||||
}
|
||||
return
|
||||
case reflect.Invalid:
|
||||
break // An invalid value is likely a nil map, etc. and acts like an empty map.
|
||||
default:
|
||||
s.errorf("range can't iterate over %v", val)
|
||||
}
|
||||
if r.ElseList != nil {
|
||||
s.walk(dot, r.ElseList)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *state) walkTemplate(dot reflect.Value, t *parse.TemplateNode) {
|
||||
s.at(t)
|
||||
tmpl := s.tmpl.tmpl[t.Name]
|
||||
if tmpl == nil {
|
||||
s.errorf("template %q not defined", t.Name)
|
||||
}
|
||||
// Variables declared by the pipeline persist.
|
||||
dot = s.evalPipeline(dot, t.Pipe)
|
||||
newState := *s
|
||||
newState.tmpl = tmpl
|
||||
// No dynamic scoping: template invocations inherit no variables.
|
||||
newState.vars = []variable{{"$", dot}}
|
||||
newState.walk(dot, tmpl.Root)
|
||||
}
|
||||
|
||||
// Eval functions evaluate pipelines, commands, and their elements and extract
|
||||
// values from the data structure by examining fields, calling methods, and so on.
|
||||
// The printing of those values happens only through walk functions.
|
||||
|
||||
// evalPipeline returns the value acquired by evaluating a pipeline. If the
|
||||
// pipeline has a variable declaration, the variable will be pushed on the
|
||||
// stack. Callers should therefore pop the stack after they are finished
|
||||
// executing commands depending on the pipeline value.
|
||||
func (s *state) evalPipeline(dot reflect.Value, pipe *parse.PipeNode) (value reflect.Value) {
|
||||
if pipe == nil {
|
||||
return
|
||||
}
|
||||
s.at(pipe)
|
||||
for _, cmd := range pipe.Cmds {
|
||||
value = s.evalCommand(dot, cmd, value) // previous value is this one's final arg.
|
||||
// If the object has type interface{}, dig down one level to the thing inside.
|
||||
if value.Kind() == reflect.Interface && value.Type().NumMethod() == 0 {
|
||||
value = reflect.ValueOf(value.Interface()) // lovely!
|
||||
}
|
||||
}
|
||||
for _, variable := range pipe.Decl {
|
||||
s.push(variable.Ident[0], value)
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
func (s *state) notAFunction(args []parse.Node, final reflect.Value) {
|
||||
if len(args) > 1 || final.IsValid() {
|
||||
s.errorf("can't give argument to non-function %s", args[0])
|
||||
}
|
||||
}
|
||||
|
||||
func (s *state) evalCommand(dot reflect.Value, cmd *parse.CommandNode, final reflect.Value) reflect.Value {
|
||||
firstWord := cmd.Args[0]
|
||||
switch n := firstWord.(type) {
|
||||
case *parse.FieldNode:
|
||||
return s.evalFieldNode(dot, n, cmd.Args, final)
|
||||
case *parse.ChainNode:
|
||||
return s.evalChainNode(dot, n, cmd.Args, final)
|
||||
case *parse.IdentifierNode:
|
||||
// Must be a function.
|
||||
return s.evalFunction(dot, n, cmd, cmd.Args, final)
|
||||
case *parse.PipeNode:
|
||||
// Parenthesized pipeline. The arguments are all inside the pipeline; final is ignored.
|
||||
return s.evalPipeline(dot, n)
|
||||
case *parse.VariableNode:
|
||||
return s.evalVariableNode(dot, n, cmd.Args, final)
|
||||
}
|
||||
s.at(firstWord)
|
||||
s.notAFunction(cmd.Args, final)
|
||||
switch word := firstWord.(type) {
|
||||
case *parse.BoolNode:
|
||||
return reflect.ValueOf(word.True)
|
||||
case *parse.DotNode:
|
||||
return dot
|
||||
case *parse.NilNode:
|
||||
s.errorf("nil is not a command")
|
||||
case *parse.NumberNode:
|
||||
return s.idealConstant(word)
|
||||
case *parse.StringNode:
|
||||
return reflect.ValueOf(word.Text)
|
||||
}
|
||||
s.errorf("can't evaluate command %q", firstWord)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
// idealConstant is called to return the value of a number in a context where
|
||||
// we don't know the type. In that case, the syntax of the number tells us
|
||||
// its type, and we use Go rules to resolve. Note there is no such thing as
|
||||
// a uint ideal constant in this situation - the value must be of int type.
|
||||
func (s *state) idealConstant(constant *parse.NumberNode) reflect.Value {
|
||||
// These are ideal constants but we don't know the type
|
||||
// and we have no context. (If it was a method argument,
|
||||
// we'd know what we need.) The syntax guides us to some extent.
|
||||
s.at(constant)
|
||||
switch {
|
||||
case constant.IsComplex:
|
||||
return reflect.ValueOf(constant.Complex128) // incontrovertible.
|
||||
case constant.IsFloat && !isHexConstant(constant.Text) && strings.IndexAny(constant.Text, ".eE") >= 0:
|
||||
return reflect.ValueOf(constant.Float64)
|
||||
case constant.IsInt:
|
||||
n := int(constant.Int64)
|
||||
if int64(n) != constant.Int64 {
|
||||
s.errorf("%s overflows int", constant.Text)
|
||||
}
|
||||
return reflect.ValueOf(n)
|
||||
case constant.IsUint:
|
||||
s.errorf("%s overflows int", constant.Text)
|
||||
}
|
||||
return zero
|
||||
}
|
||||
|
||||
func isHexConstant(s string) bool {
|
||||
return len(s) > 2 && s[0] == '0' && (s[1] == 'x' || s[1] == 'X')
|
||||
}
|
||||
|
||||
func (s *state) evalFieldNode(dot reflect.Value, field *parse.FieldNode, args []parse.Node, final reflect.Value) reflect.Value {
|
||||
s.at(field)
|
||||
return s.evalFieldChain(dot, dot, field, field.Ident, args, final)
|
||||
}
|
||||
|
||||
func (s *state) evalChainNode(dot reflect.Value, chain *parse.ChainNode, args []parse.Node, final reflect.Value) reflect.Value {
|
||||
s.at(chain)
|
||||
// (pipe).Field1.Field2 has pipe as .Node, fields as .Field. Eval the pipeline, then the fields.
|
||||
pipe := s.evalArg(dot, nil, chain.Node)
|
||||
if len(chain.Field) == 0 {
|
||||
s.errorf("internal error: no fields in evalChainNode")
|
||||
}
|
||||
return s.evalFieldChain(dot, pipe, chain, chain.Field, args, final)
|
||||
}
|
||||
|
||||
func (s *state) evalVariableNode(dot reflect.Value, variable *parse.VariableNode, args []parse.Node, final reflect.Value) reflect.Value {
|
||||
// $x.Field has $x as the first ident, Field as the second. Eval the var, then the fields.
|
||||
s.at(variable)
|
||||
value := s.varValue(variable.Ident[0])
|
||||
if len(variable.Ident) == 1 {
|
||||
s.notAFunction(args, final)
|
||||
return value
|
||||
}
|
||||
return s.evalFieldChain(dot, value, variable, variable.Ident[1:], args, final)
|
||||
}
|
||||
|
||||
// evalFieldChain evaluates .X.Y.Z possibly followed by arguments.
|
||||
// dot is the environment in which to evaluate arguments, while
|
||||
// receiver is the value being walked along the chain.
|
||||
func (s *state) evalFieldChain(dot, receiver reflect.Value, node parse.Node, ident []string, args []parse.Node, final reflect.Value) reflect.Value {
|
||||
n := len(ident)
|
||||
for i := 0; i < n-1; i++ {
|
||||
receiver = s.evalField(dot, ident[i], node, nil, zero, receiver)
|
||||
}
|
||||
// Now if it's a method, it gets the arguments.
|
||||
return s.evalField(dot, ident[n-1], node, args, final, receiver)
|
||||
}
|
||||
|
||||
func (s *state) evalFunction(dot reflect.Value, node *parse.IdentifierNode, cmd parse.Node, args []parse.Node, final reflect.Value) reflect.Value {
|
||||
s.at(node)
|
||||
name := node.Ident
|
||||
function, ok := findFunction(name, s.tmpl)
|
||||
if !ok {
|
||||
s.errorf("%q is not a defined function", name)
|
||||
}
|
||||
return s.evalCall(dot, function, cmd, name, args, final)
|
||||
}
|
||||
|
||||
// evalField evaluates an expression like (.Field) or (.Field arg1 arg2).
|
||||
// The 'final' argument represents the return value from the preceding
|
||||
// value of the pipeline, if any.
|
||||
func (s *state) evalField(dot reflect.Value, fieldName string, node parse.Node, args []parse.Node, final, receiver reflect.Value) reflect.Value {
|
||||
if !receiver.IsValid() {
|
||||
return zero
|
||||
}
|
||||
typ := receiver.Type()
|
||||
receiver, _ = indirect(receiver)
|
||||
// Unless it's an interface, need to get to a value of type *T to guarantee
|
||||
// we see all methods of T and *T.
|
||||
ptr := receiver
|
||||
if ptr.Kind() != reflect.Interface && ptr.CanAddr() {
|
||||
ptr = ptr.Addr()
|
||||
}
|
||||
if method := ptr.MethodByName(fieldName); method.IsValid() {
|
||||
return s.evalCall(dot, method, node, fieldName, args, final)
|
||||
}
|
||||
hasArgs := len(args) > 1 || final.IsValid()
|
||||
// It's not a method; must be a field of a struct or an element of a map. The receiver must not be nil.
|
||||
receiver, isNil := indirect(receiver)
|
||||
if isNil {
|
||||
s.errorf("nil pointer evaluating %s.%s", typ, fieldName)
|
||||
}
|
||||
switch receiver.Kind() {
|
||||
case reflect.Struct:
|
||||
tField, ok := receiver.Type().FieldByName(fieldName)
|
||||
if ok {
|
||||
field := receiver.FieldByIndex(tField.Index)
|
||||
if tField.PkgPath != "" { // field is unexported
|
||||
s.errorf("%s is an unexported field of struct type %s", fieldName, typ)
|
||||
}
|
||||
// If it's a function, we must call it.
|
||||
if hasArgs {
|
||||
s.errorf("%s has arguments but cannot be invoked as function", fieldName)
|
||||
}
|
||||
return field
|
||||
}
|
||||
s.errorf("%s is not a field of struct type %s", fieldName, typ)
|
||||
case reflect.Map:
|
||||
// If it's a map, attempt to use the field name as a key.
|
||||
nameVal := reflect.ValueOf(fieldName)
|
||||
if nameVal.Type().AssignableTo(receiver.Type().Key()) {
|
||||
if hasArgs {
|
||||
s.errorf("%s is not a method but has arguments", fieldName)
|
||||
}
|
||||
return receiver.MapIndex(nameVal)
|
||||
}
|
||||
}
|
||||
s.errorf("can't evaluate field %s in type %s", fieldName, typ)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
var (
|
||||
errorType = reflect.TypeOf((*error)(nil)).Elem()
|
||||
fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
|
||||
)
|
||||
|
||||
// evalCall executes a function or method call. If it's a method, fun already has the receiver bound, so
|
||||
// it looks just like a function call. The arg list, if non-nil, includes (in the manner of the shell), arg[0]
|
||||
// as the function itself.
|
||||
func (s *state) evalCall(dot, fun reflect.Value, node parse.Node, name string, args []parse.Node, final reflect.Value) reflect.Value {
|
||||
if args != nil {
|
||||
args = args[1:] // Zeroth arg is function name/node; not passed to function.
|
||||
}
|
||||
typ := fun.Type()
|
||||
numIn := len(args)
|
||||
if final.IsValid() {
|
||||
numIn++
|
||||
}
|
||||
numFixed := len(args)
|
||||
if typ.IsVariadic() {
|
||||
numFixed = typ.NumIn() - 1 // last arg is the variadic one.
|
||||
if numIn < numFixed {
|
||||
s.errorf("wrong number of args for %s: want at least %d got %d", name, typ.NumIn()-1, len(args))
|
||||
}
|
||||
} else if numIn < typ.NumIn()-1 || !typ.IsVariadic() && numIn != typ.NumIn() {
|
||||
s.errorf("wrong number of args for %s: want %d got %d", name, typ.NumIn(), len(args))
|
||||
}
|
||||
if !goodFunc(typ) {
|
||||
// TODO: This could still be a confusing error; maybe goodFunc should provide info.
|
||||
s.errorf("can't call method/function %q with %d results", name, typ.NumOut())
|
||||
}
|
||||
// Build the arg list.
|
||||
argv := make([]reflect.Value, numIn)
|
||||
// Args must be evaluated. Fixed args first.
|
||||
i := 0
|
||||
for ; i < numFixed && i < len(args); i++ {
|
||||
argv[i] = s.evalArg(dot, typ.In(i), args[i])
|
||||
}
|
||||
// Now the ... args.
|
||||
if typ.IsVariadic() {
|
||||
argType := typ.In(typ.NumIn() - 1).Elem() // Argument is a slice.
|
||||
for ; i < len(args); i++ {
|
||||
argv[i] = s.evalArg(dot, argType, args[i])
|
||||
}
|
||||
}
|
||||
// Add final value if necessary.
|
||||
if final.IsValid() {
|
||||
t := typ.In(typ.NumIn() - 1)
|
||||
if typ.IsVariadic() {
|
||||
t = t.Elem()
|
||||
}
|
||||
argv[i] = s.validateType(final, t)
|
||||
}
|
||||
result := fun.Call(argv)
|
||||
// If we have an error that is not nil, stop execution and return that error to the caller.
|
||||
if len(result) == 2 && !result[1].IsNil() {
|
||||
s.at(node)
|
||||
s.errorf("error calling %s: %s", name, result[1].Interface().(error))
|
||||
}
|
||||
return result[0]
|
||||
}
|
||||
|
||||
// canBeNil reports whether an untyped nil can be assigned to the type. See reflect.Zero.
|
||||
func canBeNil(typ reflect.Type) bool {
|
||||
switch typ.Kind() {
|
||||
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// validateType guarantees that the value is valid and assignable to the type.
|
||||
func (s *state) validateType(value reflect.Value, typ reflect.Type) reflect.Value {
|
||||
if !value.IsValid() {
|
||||
if typ == nil || canBeNil(typ) {
|
||||
// An untyped nil interface{}. Accept as a proper nil value.
|
||||
return reflect.Zero(typ)
|
||||
}
|
||||
s.errorf("invalid value; expected %s", typ)
|
||||
}
|
||||
if typ != nil && !value.Type().AssignableTo(typ) {
|
||||
if value.Kind() == reflect.Interface && !value.IsNil() {
|
||||
value = value.Elem()
|
||||
if value.Type().AssignableTo(typ) {
|
||||
return value
|
||||
}
|
||||
// fallthrough
|
||||
}
|
||||
// Does one dereference or indirection work? We could do more, as we
|
||||
// do with method receivers, but that gets messy and method receivers
|
||||
// are much more constrained, so it makes more sense there than here.
|
||||
// Besides, one is almost always all you need.
|
||||
switch {
|
||||
case value.Kind() == reflect.Ptr && value.Type().Elem().AssignableTo(typ):
|
||||
value = value.Elem()
|
||||
if !value.IsValid() {
|
||||
s.errorf("dereference of nil pointer of type %s", typ)
|
||||
}
|
||||
case reflect.PtrTo(value.Type()).AssignableTo(typ) && value.CanAddr():
|
||||
value = value.Addr()
|
||||
default:
|
||||
s.errorf("wrong type for value; expected %s; got %s", typ, value.Type())
|
||||
}
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
func (s *state) evalArg(dot reflect.Value, typ reflect.Type, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
switch arg := n.(type) {
|
||||
case *parse.DotNode:
|
||||
return s.validateType(dot, typ)
|
||||
case *parse.NilNode:
|
||||
if canBeNil(typ) {
|
||||
return reflect.Zero(typ)
|
||||
}
|
||||
s.errorf("cannot assign nil to %s", typ)
|
||||
case *parse.FieldNode:
|
||||
return s.validateType(s.evalFieldNode(dot, arg, []parse.Node{n}, zero), typ)
|
||||
case *parse.VariableNode:
|
||||
return s.validateType(s.evalVariableNode(dot, arg, nil, zero), typ)
|
||||
case *parse.PipeNode:
|
||||
return s.validateType(s.evalPipeline(dot, arg), typ)
|
||||
case *parse.IdentifierNode:
|
||||
return s.evalFunction(dot, arg, arg, nil, zero)
|
||||
case *parse.ChainNode:
|
||||
return s.validateType(s.evalChainNode(dot, arg, nil, zero), typ)
|
||||
}
|
||||
switch typ.Kind() {
|
||||
case reflect.Bool:
|
||||
return s.evalBool(typ, n)
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
return s.evalComplex(typ, n)
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return s.evalFloat(typ, n)
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return s.evalInteger(typ, n)
|
||||
case reflect.Interface:
|
||||
if typ.NumMethod() == 0 {
|
||||
return s.evalEmptyInterface(dot, n)
|
||||
}
|
||||
case reflect.String:
|
||||
return s.evalString(typ, n)
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return s.evalUnsignedInteger(typ, n)
|
||||
}
|
||||
s.errorf("can't handle %s for arg of type %s", n, typ)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalBool(typ reflect.Type, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
if n, ok := n.(*parse.BoolNode); ok {
|
||||
value := reflect.New(typ).Elem()
|
||||
value.SetBool(n.True)
|
||||
return value
|
||||
}
|
||||
s.errorf("expected bool; found %s", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalString(typ reflect.Type, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
if n, ok := n.(*parse.StringNode); ok {
|
||||
value := reflect.New(typ).Elem()
|
||||
value.SetString(n.Text)
|
||||
return value
|
||||
}
|
||||
s.errorf("expected string; found %s", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalInteger(typ reflect.Type, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
if n, ok := n.(*parse.NumberNode); ok && n.IsInt {
|
||||
value := reflect.New(typ).Elem()
|
||||
value.SetInt(n.Int64)
|
||||
return value
|
||||
}
|
||||
s.errorf("expected integer; found %s", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalUnsignedInteger(typ reflect.Type, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
if n, ok := n.(*parse.NumberNode); ok && n.IsUint {
|
||||
value := reflect.New(typ).Elem()
|
||||
value.SetUint(n.Uint64)
|
||||
return value
|
||||
}
|
||||
s.errorf("expected unsigned integer; found %s", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalFloat(typ reflect.Type, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
if n, ok := n.(*parse.NumberNode); ok && n.IsFloat {
|
||||
value := reflect.New(typ).Elem()
|
||||
value.SetFloat(n.Float64)
|
||||
return value
|
||||
}
|
||||
s.errorf("expected float; found %s", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalComplex(typ reflect.Type, n parse.Node) reflect.Value {
|
||||
if n, ok := n.(*parse.NumberNode); ok && n.IsComplex {
|
||||
value := reflect.New(typ).Elem()
|
||||
value.SetComplex(n.Complex128)
|
||||
return value
|
||||
}
|
||||
s.errorf("expected complex; found %s", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalEmptyInterface(dot reflect.Value, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
switch n := n.(type) {
|
||||
case *parse.BoolNode:
|
||||
return reflect.ValueOf(n.True)
|
||||
case *parse.DotNode:
|
||||
return dot
|
||||
case *parse.FieldNode:
|
||||
return s.evalFieldNode(dot, n, nil, zero)
|
||||
case *parse.IdentifierNode:
|
||||
return s.evalFunction(dot, n, n, nil, zero)
|
||||
case *parse.NilNode:
|
||||
// NilNode is handled in evalArg, the only place that calls here.
|
||||
s.errorf("evalEmptyInterface: nil (can't happen)")
|
||||
case *parse.NumberNode:
|
||||
return s.idealConstant(n)
|
||||
case *parse.StringNode:
|
||||
return reflect.ValueOf(n.Text)
|
||||
case *parse.VariableNode:
|
||||
return s.evalVariableNode(dot, n, nil, zero)
|
||||
case *parse.PipeNode:
|
||||
return s.evalPipeline(dot, n)
|
||||
}
|
||||
s.errorf("can't handle assignment of %s to empty interface argument", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
// indirect returns the item at the end of indirection, and a bool to indicate if it's nil.
|
||||
// We indirect through pointers and empty interfaces (only) because
|
||||
// non-empty interfaces have methods we might need.
|
||||
func indirect(v reflect.Value) (rv reflect.Value, isNil bool) {
|
||||
for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() {
|
||||
if v.IsNil() {
|
||||
return v, true
|
||||
}
|
||||
if v.Kind() == reflect.Interface && v.NumMethod() > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return v, false
|
||||
}
|
||||
|
||||
// printValue writes the textual representation of the value to the output of
|
||||
// the template.
|
||||
func (s *state) printValue(n parse.Node, v reflect.Value) {
|
||||
s.at(n)
|
||||
iface, ok := printableValue(v)
|
||||
if !ok {
|
||||
s.errorf("can't print %s of type %s", n, v.Type())
|
||||
}
|
||||
fmt.Fprint(s.wr, iface)
|
||||
}
|
||||
|
||||
// printableValue returns the, possibly indirected, interface value inside v that
|
||||
// is best for a call to formatted printer.
|
||||
func printableValue(v reflect.Value) (interface{}, bool) {
|
||||
if v.Kind() == reflect.Ptr {
|
||||
v, _ = indirect(v) // fmt.Fprint handles nil.
|
||||
}
|
||||
if !v.IsValid() {
|
||||
return "<no value>", true
|
||||
}
|
||||
|
||||
if !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) {
|
||||
if v.CanAddr() && (reflect.PtrTo(v.Type()).Implements(errorType) || reflect.PtrTo(v.Type()).Implements(fmtStringerType)) {
|
||||
v = v.Addr()
|
||||
} else {
|
||||
switch v.Kind() {
|
||||
case reflect.Chan, reflect.Func:
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
}
|
||||
return v.Interface(), true
|
||||
}
|
||||
|
||||
// Types to help sort the keys in a map for reproducible output.
|
||||
|
||||
type rvs []reflect.Value
|
||||
|
||||
func (x rvs) Len() int { return len(x) }
|
||||
func (x rvs) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
type rvInts struct{ rvs }
|
||||
|
||||
func (x rvInts) Less(i, j int) bool { return x.rvs[i].Int() < x.rvs[j].Int() }
|
||||
|
||||
type rvUints struct{ rvs }
|
||||
|
||||
func (x rvUints) Less(i, j int) bool { return x.rvs[i].Uint() < x.rvs[j].Uint() }
|
||||
|
||||
type rvFloats struct{ rvs }
|
||||
|
||||
func (x rvFloats) Less(i, j int) bool { return x.rvs[i].Float() < x.rvs[j].Float() }
|
||||
|
||||
type rvStrings struct{ rvs }
|
||||
|
||||
func (x rvStrings) Less(i, j int) bool { return x.rvs[i].String() < x.rvs[j].String() }
|
||||
|
||||
// sortKeys sorts (if it can) the slice of reflect.Values, which is a slice of map keys.
|
||||
func sortKeys(v []reflect.Value) []reflect.Value {
|
||||
if len(v) <= 1 {
|
||||
return v
|
||||
}
|
||||
switch v[0].Kind() {
|
||||
case reflect.Float32, reflect.Float64:
|
||||
sort.Sort(rvFloats{v})
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
sort.Sort(rvInts{v})
|
||||
case reflect.String:
|
||||
sort.Sort(rvStrings{v})
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
sort.Sort(rvUints{v})
|
||||
}
|
||||
return v
|
||||
}
|
||||
598
vendor/github.com/alecthomas/template/funcs.go
generated
vendored
Normal file
@ -0,0 +1,598 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package template
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// FuncMap is the type of the map defining the mapping from names to functions.
|
||||
// Each function must have either a single return value, or two return values of
|
||||
// which the second has type error. In that case, if the second (error)
|
||||
// return value evaluates to non-nil during execution, execution terminates and
|
||||
// Execute returns that error.
|
||||
type FuncMap map[string]interface{}
|
||||
|
||||
var builtins = FuncMap{
|
||||
"and": and,
|
||||
"call": call,
|
||||
"html": HTMLEscaper,
|
||||
"index": index,
|
||||
"js": JSEscaper,
|
||||
"len": length,
|
||||
"not": not,
|
||||
"or": or,
|
||||
"print": fmt.Sprint,
|
||||
"printf": fmt.Sprintf,
|
||||
"println": fmt.Sprintln,
|
||||
"urlquery": URLQueryEscaper,
|
||||
|
||||
// Comparisons
|
||||
"eq": eq, // ==
|
||||
"ge": ge, // >=
|
||||
"gt": gt, // >
|
||||
"le": le, // <=
|
||||
"lt": lt, // <
|
||||
"ne": ne, // !=
|
||||
}
|
||||
|
||||
var builtinFuncs = createValueFuncs(builtins)
|
||||
|
||||
// createValueFuncs turns a FuncMap into a map[string]reflect.Value
|
||||
func createValueFuncs(funcMap FuncMap) map[string]reflect.Value {
|
||||
m := make(map[string]reflect.Value)
|
||||
addValueFuncs(m, funcMap)
|
||||
return m
|
||||
}
|
||||
|
||||
// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values.
|
||||
func addValueFuncs(out map[string]reflect.Value, in FuncMap) {
|
||||
for name, fn := range in {
|
||||
v := reflect.ValueOf(fn)
|
||||
if v.Kind() != reflect.Func {
|
||||
panic("value for " + name + " not a function")
|
||||
}
|
||||
if !goodFunc(v.Type()) {
|
||||
panic(fmt.Errorf("can't install method/function %q with %d results", name, v.Type().NumOut()))
|
||||
}
|
||||
out[name] = v
|
||||
}
|
||||
}
|
||||
|
||||
// addFuncs adds to values the functions in funcs. It does no checking of the input -
|
||||
// call addValueFuncs first.
|
||||
func addFuncs(out, in FuncMap) {
|
||||
for name, fn := range in {
|
||||
out[name] = fn
|
||||
}
|
||||
}
|
||||
|
||||
// goodFunc checks that the function or method has the right result signature.
|
||||
func goodFunc(typ reflect.Type) bool {
|
||||
// We allow functions with 1 result or 2 results where the second is an error.
|
||||
switch {
|
||||
case typ.NumOut() == 1:
|
||||
return true
|
||||
case typ.NumOut() == 2 && typ.Out(1) == errorType:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// findFunction looks for a function in the template, and global map.
|
||||
func findFunction(name string, tmpl *Template) (reflect.Value, bool) {
|
||||
if tmpl != nil && tmpl.common != nil {
|
||||
if fn := tmpl.execFuncs[name]; fn.IsValid() {
|
||||
return fn, true
|
||||
}
|
||||
}
|
||||
if fn := builtinFuncs[name]; fn.IsValid() {
|
||||
return fn, true
|
||||
}
|
||||
return reflect.Value{}, false
|
||||
}
|
||||
|
||||
// Indexing.
|
||||
|
||||
// index returns the result of indexing its first argument by the following
|
||||
// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each
|
||||
// indexed item must be a map, slice, or array.
|
||||
func index(item interface{}, indices ...interface{}) (interface{}, error) {
|
||||
v := reflect.ValueOf(item)
|
||||
for _, i := range indices {
|
||||
index := reflect.ValueOf(i)
|
||||
var isNil bool
|
||||
if v, isNil = indirect(v); isNil {
|
||||
return nil, fmt.Errorf("index of nil pointer")
|
||||
}
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Slice, reflect.String:
|
||||
var x int64
|
||||
switch index.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
x = index.Int()
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
x = int64(index.Uint())
|
||||
default:
|
||||
return nil, fmt.Errorf("cannot index slice/array with type %s", index.Type())
|
||||
}
|
||||
if x < 0 || x >= int64(v.Len()) {
|
||||
return nil, fmt.Errorf("index out of range: %d", x)
|
||||
}
|
||||
v = v.Index(int(x))
|
||||
case reflect.Map:
|
||||
if !index.IsValid() {
|
||||
index = reflect.Zero(v.Type().Key())
|
||||
}
|
||||
if !index.Type().AssignableTo(v.Type().Key()) {
|
||||
return nil, fmt.Errorf("%s is not index type for %s", index.Type(), v.Type())
|
||||
}
|
||||
if x := v.MapIndex(index); x.IsValid() {
|
||||
v = x
|
||||
} else {
|
||||
v = reflect.Zero(v.Type().Elem())
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("can't index item of type %s", v.Type())
|
||||
}
|
||||
}
|
||||
return v.Interface(), nil
|
||||
}
|
||||
|
||||
// Length
|
||||
|
||||
// length returns the length of the item, with an error if it has no defined length.
|
||||
func length(item interface{}) (int, error) {
|
||||
v, isNil := indirect(reflect.ValueOf(item))
|
||||
if isNil {
|
||||
return 0, fmt.Errorf("len of nil pointer")
|
||||
}
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
|
||||
return v.Len(), nil
|
||||
}
|
||||
return 0, fmt.Errorf("len of type %s", v.Type())
|
||||
}
|
||||
|
||||
// Function invocation
|
||||
|
||||
// call returns the result of evaluating the first argument as a function.
|
||||
// The function must return 1 result, or 2 results, the second of which is an error.
|
||||
func call(fn interface{}, args ...interface{}) (interface{}, error) {
|
||||
v := reflect.ValueOf(fn)
|
||||
typ := v.Type()
|
||||
if typ.Kind() != reflect.Func {
|
||||
return nil, fmt.Errorf("non-function of type %s", typ)
|
||||
}
|
||||
if !goodFunc(typ) {
|
||||
return nil, fmt.Errorf("function called with %d args; should be 1 or 2", typ.NumOut())
|
||||
}
|
||||
numIn := typ.NumIn()
|
||||
var dddType reflect.Type
|
||||
if typ.IsVariadic() {
|
||||
if len(args) < numIn-1 {
|
||||
return nil, fmt.Errorf("wrong number of args: got %d want at least %d", len(args), numIn-1)
|
||||
}
|
||||
dddType = typ.In(numIn - 1).Elem()
|
||||
} else {
|
||||
if len(args) != numIn {
|
||||
return nil, fmt.Errorf("wrong number of args: got %d want %d", len(args), numIn)
|
||||
}
|
||||
}
|
||||
argv := make([]reflect.Value, len(args))
|
||||
for i, arg := range args {
|
||||
value := reflect.ValueOf(arg)
|
||||
// Compute the expected type. Clumsy because of variadics.
|
||||
var argType reflect.Type
|
||||
if !typ.IsVariadic() || i < numIn-1 {
|
||||
argType = typ.In(i)
|
||||
} else {
|
||||
argType = dddType
|
||||
}
|
||||
if !value.IsValid() && canBeNil(argType) {
|
||||
value = reflect.Zero(argType)
|
||||
}
|
||||
if !value.Type().AssignableTo(argType) {
|
||||
return nil, fmt.Errorf("arg %d has type %s; should be %s", i, value.Type(), argType)
|
||||
}
|
||||
argv[i] = value
|
||||
}
|
||||
result := v.Call(argv)
|
||||
if len(result) == 2 && !result[1].IsNil() {
|
||||
return result[0].Interface(), result[1].Interface().(error)
|
||||
}
|
||||
return result[0].Interface(), nil
|
||||
}
|
||||
|
||||
// Boolean logic.
|
||||
|
||||
func truth(a interface{}) bool {
|
||||
t, _ := isTrue(reflect.ValueOf(a))
|
||||
return t
|
||||
}
|
||||
|
||||
// and computes the Boolean AND of its arguments, returning
|
||||
// the first false argument it encounters, or the last argument.
|
||||
func and(arg0 interface{}, args ...interface{}) interface{} {
|
||||
if !truth(arg0) {
|
||||
return arg0
|
||||
}
|
||||
for i := range args {
|
||||
arg0 = args[i]
|
||||
if !truth(arg0) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return arg0
|
||||
}
|
||||
|
||||
// or computes the Boolean OR of its arguments, returning
|
||||
// the first true argument it encounters, or the last argument.
|
||||
func or(arg0 interface{}, args ...interface{}) interface{} {
|
||||
if truth(arg0) {
|
||||
return arg0
|
||||
}
|
||||
for i := range args {
|
||||
arg0 = args[i]
|
||||
if truth(arg0) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return arg0
|
||||
}
|
||||
|
||||
// not returns the Boolean negation of its argument.
|
||||
func not(arg interface{}) (truth bool) {
|
||||
truth, _ = isTrue(reflect.ValueOf(arg))
|
||||
return !truth
|
||||
}
|
||||
|
||||
// Comparison.
|
||||
|
||||
// TODO: Perhaps allow comparison between signed and unsigned integers.
|
||||
|
||||
var (
|
||||
errBadComparisonType = errors.New("invalid type for comparison")
|
||||
errBadComparison = errors.New("incompatible types for comparison")
|
||||
errNoComparison = errors.New("missing argument for comparison")
|
||||
)
|
||||
|
||||
type kind int
|
||||
|
||||
const (
|
||||
invalidKind kind = iota
|
||||
boolKind
|
||||
complexKind
|
||||
intKind
|
||||
floatKind
|
||||
integerKind
|
||||
stringKind
|
||||
uintKind
|
||||
)
|
||||
|
||||
func basicKind(v reflect.Value) (kind, error) {
|
||||
switch v.Kind() {
|
||||
case reflect.Bool:
|
||||
return boolKind, nil
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return intKind, nil
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return uintKind, nil
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return floatKind, nil
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
return complexKind, nil
|
||||
case reflect.String:
|
||||
return stringKind, nil
|
||||
}
|
||||
return invalidKind, errBadComparisonType
|
||||
}
|
||||
|
||||
// eq evaluates the comparison a == b || a == c || ...
|
||||
func eq(arg1 interface{}, arg2 ...interface{}) (bool, error) {
|
||||
v1 := reflect.ValueOf(arg1)
|
||||
k1, err := basicKind(v1)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(arg2) == 0 {
|
||||
return false, errNoComparison
|
||||
}
|
||||
for _, arg := range arg2 {
|
||||
v2 := reflect.ValueOf(arg)
|
||||
k2, err := basicKind(v2)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
truth := false
|
||||
if k1 != k2 {
|
||||
// Special case: Can compare integer values regardless of type's sign.
|
||||
switch {
|
||||
case k1 == intKind && k2 == uintKind:
|
||||
truth = v1.Int() >= 0 && uint64(v1.Int()) == v2.Uint()
|
||||
case k1 == uintKind && k2 == intKind:
|
||||
truth = v2.Int() >= 0 && v1.Uint() == uint64(v2.Int())
|
||||
default:
|
||||
return false, errBadComparison
|
||||
}
|
||||
} else {
|
||||
switch k1 {
|
||||
case boolKind:
|
||||
truth = v1.Bool() == v2.Bool()
|
||||
case complexKind:
|
||||
truth = v1.Complex() == v2.Complex()
|
||||
case floatKind:
|
||||
truth = v1.Float() == v2.Float()
|
||||
case intKind:
|
||||
truth = v1.Int() == v2.Int()
|
||||
case stringKind:
|
||||
truth = v1.String() == v2.String()
|
||||
case uintKind:
|
||||
truth = v1.Uint() == v2.Uint()
|
||||
default:
|
||||
panic("invalid kind")
|
||||
}
|
||||
}
|
||||
if truth {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// ne evaluates the comparison a != b.
|
||||
func ne(arg1, arg2 interface{}) (bool, error) {
|
||||
// != is the inverse of ==.
|
||||
equal, err := eq(arg1, arg2)
|
||||
return !equal, err
|
||||
}
|
||||
|
||||
// lt evaluates the comparison a < b.
|
||||
func lt(arg1, arg2 interface{}) (bool, error) {
|
||||
v1 := reflect.ValueOf(arg1)
|
||||
k1, err := basicKind(v1)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
v2 := reflect.ValueOf(arg2)
|
||||
k2, err := basicKind(v2)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
truth := false
|
||||
if k1 != k2 {
|
||||
// Special case: Can compare integer values regardless of type's sign.
|
||||
switch {
|
||||
case k1 == intKind && k2 == uintKind:
|
||||
truth = v1.Int() < 0 || uint64(v1.Int()) < v2.Uint()
|
||||
case k1 == uintKind && k2 == intKind:
|
||||
truth = v2.Int() >= 0 && v1.Uint() < uint64(v2.Int())
|
||||
default:
|
||||
return false, errBadComparison
|
||||
}
|
||||
} else {
|
||||
switch k1 {
|
||||
case boolKind, complexKind:
|
||||
return false, errBadComparisonType
|
||||
case floatKind:
|
||||
truth = v1.Float() < v2.Float()
|
||||
case intKind:
|
||||
truth = v1.Int() < v2.Int()
|
||||
case stringKind:
|
||||
truth = v1.String() < v2.String()
|
||||
case uintKind:
|
||||
truth = v1.Uint() < v2.Uint()
|
||||
default:
|
||||
panic("invalid kind")
|
||||
}
|
||||
}
|
||||
return truth, nil
|
||||
}
|
||||
|
||||
// le evaluates the comparison <= b.
|
||||
func le(arg1, arg2 interface{}) (bool, error) {
|
||||
// <= is < or ==.
|
||||
lessThan, err := lt(arg1, arg2)
|
||||
if lessThan || err != nil {
|
||||
return lessThan, err
|
||||
}
|
||||
return eq(arg1, arg2)
|
||||
}
|
||||
|
||||
// gt evaluates the comparison a > b.
|
||||
func gt(arg1, arg2 interface{}) (bool, error) {
|
||||
// > is the inverse of <=.
|
||||
lessOrEqual, err := le(arg1, arg2)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return !lessOrEqual, nil
|
||||
}
|
||||
|
||||
// ge evaluates the comparison a >= b.
|
||||
func ge(arg1, arg2 interface{}) (bool, error) {
|
||||
// >= is the inverse of <.
|
||||
lessThan, err := lt(arg1, arg2)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return !lessThan, nil
|
||||
}
|
||||
|
||||
// HTML escaping.
|
||||
|
||||
var (
|
||||
htmlQuot = []byte(""") // shorter than """
|
||||
htmlApos = []byte("'") // shorter than "'" and apos was not in HTML until HTML5
|
||||
htmlAmp = []byte("&")
|
||||
htmlLt = []byte("<")
|
||||
htmlGt = []byte(">")
|
||||
)
|
||||
|
||||
// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b.
|
||||
func HTMLEscape(w io.Writer, b []byte) {
|
||||
last := 0
|
||||
for i, c := range b {
|
||||
var html []byte
|
||||
switch c {
|
||||
case '"':
|
||||
html = htmlQuot
|
||||
case '\'':
|
||||
html = htmlApos
|
||||
case '&':
|
||||
html = htmlAmp
|
||||
case '<':
|
||||
html = htmlLt
|
||||
case '>':
|
||||
html = htmlGt
|
||||
default:
|
||||
continue
|
||||
}
|
||||
w.Write(b[last:i])
|
||||
w.Write(html)
|
||||
last = i + 1
|
||||
}
|
||||
w.Write(b[last:])
|
||||
}
|
||||
|
||||
// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s.
|
||||
func HTMLEscapeString(s string) string {
|
||||
// Avoid allocation if we can.
|
||||
if strings.IndexAny(s, `'"&<>`) < 0 {
|
||||
return s
|
||||
}
|
||||
var b bytes.Buffer
|
||||
HTMLEscape(&b, []byte(s))
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// HTMLEscaper returns the escaped HTML equivalent of the textual
|
||||
// representation of its arguments.
|
||||
func HTMLEscaper(args ...interface{}) string {
|
||||
return HTMLEscapeString(evalArgs(args))
|
||||
}
|
||||
|
||||
// JavaScript escaping.
|
||||
|
||||
var (
|
||||
jsLowUni = []byte(`\u00`)
|
||||
hex = []byte("0123456789ABCDEF")
|
||||
|
||||
jsBackslash = []byte(`\\`)
|
||||
jsApos = []byte(`\'`)
|
||||
jsQuot = []byte(`\"`)
|
||||
jsLt = []byte(`\x3C`)
|
||||
jsGt = []byte(`\x3E`)
|
||||
)
|
||||
|
||||
// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b.
|
||||
func JSEscape(w io.Writer, b []byte) {
|
||||
last := 0
|
||||
for i := 0; i < len(b); i++ {
|
||||
c := b[i]
|
||||
|
||||
if !jsIsSpecial(rune(c)) {
|
||||
// fast path: nothing to do
|
||||
continue
|
||||
}
|
||||
w.Write(b[last:i])
|
||||
|
||||
if c < utf8.RuneSelf {
|
||||
// Quotes, slashes and angle brackets get quoted.
|
||||
// Control characters get written as \u00XX.
|
||||
switch c {
|
||||
case '\\':
|
||||
w.Write(jsBackslash)
|
||||
case '\'':
|
||||
w.Write(jsApos)
|
||||
case '"':
|
||||
w.Write(jsQuot)
|
||||
case '<':
|
||||
w.Write(jsLt)
|
||||
case '>':
|
||||
w.Write(jsGt)
|
||||
default:
|
||||
w.Write(jsLowUni)
|
||||
t, b := c>>4, c&0x0f
|
||||
w.Write(hex[t : t+1])
|
||||
w.Write(hex[b : b+1])
|
||||
}
|
||||
} else {
|
||||
// Unicode rune.
|
||||
r, size := utf8.DecodeRune(b[i:])
|
||||
if unicode.IsPrint(r) {
|
||||
w.Write(b[i : i+size])
|
||||
} else {
|
||||
fmt.Fprintf(w, "\\u%04X", r)
|
||||
}
|
||||
i += size - 1
|
||||
}
|
||||
last = i + 1
|
||||
}
|
||||
w.Write(b[last:])
|
||||
}
|
||||
|
||||
// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s.
|
||||
func JSEscapeString(s string) string {
|
||||
// Avoid allocation if we can.
|
||||
if strings.IndexFunc(s, jsIsSpecial) < 0 {
|
||||
return s
|
||||
}
|
||||
var b bytes.Buffer
|
||||
JSEscape(&b, []byte(s))
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func jsIsSpecial(r rune) bool {
|
||||
switch r {
|
||||
case '\\', '\'', '"', '<', '>':
|
||||
return true
|
||||
}
|
||||
return r < ' ' || utf8.RuneSelf <= r
|
||||
}
|
||||
|
||||
// JSEscaper returns the escaped JavaScript equivalent of the textual
|
||||
// representation of its arguments.
|
||||
func JSEscaper(args ...interface{}) string {
|
||||
return JSEscapeString(evalArgs(args))
|
||||
}
|
||||
|
||||
// URLQueryEscaper returns the escaped value of the textual representation of
|
||||
// its arguments in a form suitable for embedding in a URL query.
|
||||
func URLQueryEscaper(args ...interface{}) string {
|
||||
return url.QueryEscape(evalArgs(args))
|
||||
}
|
||||
|
||||
// evalArgs formats the list of arguments into a string. It is therefore equivalent to
|
||||
// fmt.Sprint(args...)
|
||||
// except that each argument is indirected (if a pointer), as required,
|
||||
// using the same rules as the default string evaluation during template
|
||||
// execution.
|
||||
func evalArgs(args []interface{}) string {
|
||||
ok := false
|
||||
var s string
|
||||
// Fast path for simple common case.
|
||||
if len(args) == 1 {
|
||||
s, ok = args[0].(string)
|
||||
}
|
||||
if !ok {
|
||||
for i, arg := range args {
|
||||
a, ok := printableValue(reflect.ValueOf(arg))
|
||||
if ok {
|
||||
args[i] = a
|
||||
} // else left fmt do its thing
|
||||
}
|
||||
s = fmt.Sprint(args...)
|
||||
}
|
||||
return s
|
||||
}
|
||||
108
vendor/github.com/alecthomas/template/helper.go
generated
vendored
Normal file
@ -0,0 +1,108 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Helper functions to make constructing templates easier.
|
||||
|
||||
package template
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// Functions and methods to parse templates.
|
||||
|
||||
// Must is a helper that wraps a call to a function returning (*Template, error)
|
||||
// and panics if the error is non-nil. It is intended for use in variable
|
||||
// initializations such as
|
||||
// var t = template.Must(template.New("name").Parse("text"))
|
||||
func Must(t *Template, err error) *Template {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// ParseFiles creates a new Template and parses the template definitions from
|
||||
// the named files. The returned template's name will have the (base) name and
|
||||
// (parsed) contents of the first file. There must be at least one file.
|
||||
// If an error occurs, parsing stops and the returned *Template is nil.
|
||||
func ParseFiles(filenames ...string) (*Template, error) {
|
||||
return parseFiles(nil, filenames...)
|
||||
}
|
||||
|
||||
// ParseFiles parses the named files and associates the resulting templates with
|
||||
// t. If an error occurs, parsing stops and the returned template is nil;
|
||||
// otherwise it is t. There must be at least one file.
|
||||
func (t *Template) ParseFiles(filenames ...string) (*Template, error) {
|
||||
return parseFiles(t, filenames...)
|
||||
}
|
||||
|
||||
// parseFiles is the helper for the method and function. If the argument
|
||||
// template is nil, it is created from the first file.
|
||||
func parseFiles(t *Template, filenames ...string) (*Template, error) {
|
||||
if len(filenames) == 0 {
|
||||
// Not really a problem, but be consistent.
|
||||
return nil, fmt.Errorf("template: no files named in call to ParseFiles")
|
||||
}
|
||||
for _, filename := range filenames {
|
||||
b, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := string(b)
|
||||
name := filepath.Base(filename)
|
||||
// First template becomes return value if not already defined,
|
||||
// and we use that one for subsequent New calls to associate
|
||||
// all the templates together. Also, if this file has the same name
|
||||
// as t, this file becomes the contents of t, so
|
||||
// t, err := New(name).Funcs(xxx).ParseFiles(name)
|
||||
// works. Otherwise we create a new template associated with t.
|
||||
var tmpl *Template
|
||||
if t == nil {
|
||||
t = New(name)
|
||||
}
|
||||
if name == t.Name() {
|
||||
tmpl = t
|
||||
} else {
|
||||
tmpl = t.New(name)
|
||||
}
|
||||
_, err = tmpl.Parse(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// ParseGlob creates a new Template and parses the template definitions from the
|
||||
// files identified by the pattern, which must match at least one file. The
|
||||
// returned template will have the (base) name and (parsed) contents of the
|
||||
// first file matched by the pattern. ParseGlob is equivalent to calling
|
||||
// ParseFiles with the list of files matched by the pattern.
|
||||
func ParseGlob(pattern string) (*Template, error) {
|
||||
return parseGlob(nil, pattern)
|
||||
}
|
||||
|
||||
// ParseGlob parses the template definitions in the files identified by the
|
||||
// pattern and associates the resulting templates with t. The pattern is
|
||||
// processed by filepath.Glob and must match at least one file. ParseGlob is
|
||||
// equivalent to calling t.ParseFiles with the list of files matched by the
|
||||
// pattern.
|
||||
func (t *Template) ParseGlob(pattern string) (*Template, error) {
|
||||
return parseGlob(t, pattern)
|
||||
}
|
||||
|
||||
// parseGlob is the implementation of the function and method ParseGlob.
|
||||
func parseGlob(t *Template, pattern string) (*Template, error) {
|
||||
filenames, err := filepath.Glob(pattern)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(filenames) == 0 {
|
||||
return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern)
|
||||
}
|
||||
return parseFiles(t, filenames...)
|
||||
}
|
||||
556
vendor/github.com/alecthomas/template/parse/lex.go
generated
vendored
Normal file
@ -0,0 +1,556 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package parse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// item represents a token or text string returned from the scanner.
|
||||
type item struct {
|
||||
typ itemType // The type of this item.
|
||||
pos Pos // The starting position, in bytes, of this item in the input string.
|
||||
val string // The value of this item.
|
||||
}
|
||||
|
||||
func (i item) String() string {
|
||||
switch {
|
||||
case i.typ == itemEOF:
|
||||
return "EOF"
|
||||
case i.typ == itemError:
|
||||
return i.val
|
||||
case i.typ > itemKeyword:
|
||||
return fmt.Sprintf("<%s>", i.val)
|
||||
case len(i.val) > 10:
|
||||
return fmt.Sprintf("%.10q...", i.val)
|
||||
}
|
||||
return fmt.Sprintf("%q", i.val)
|
||||
}
|
||||
|
||||
// itemType identifies the type of lex items.
|
||||
type itemType int
|
||||
|
||||
const (
|
||||
itemError itemType = iota // error occurred; value is text of error
|
||||
itemBool // boolean constant
|
||||
itemChar // printable ASCII character; grab bag for comma etc.
|
||||
itemCharConstant // character constant
|
||||
itemComplex // complex constant (1+2i); imaginary is just a number
|
||||
itemColonEquals // colon-equals (':=') introducing a declaration
|
||||
itemEOF
|
||||
itemField // alphanumeric identifier starting with '.'
|
||||
itemIdentifier // alphanumeric identifier not starting with '.'
|
||||
itemLeftDelim // left action delimiter
|
||||
itemLeftParen // '(' inside action
|
||||
itemNumber // simple number, including imaginary
|
||||
itemPipe // pipe symbol
|
||||
itemRawString // raw quoted string (includes quotes)
|
||||
itemRightDelim // right action delimiter
|
||||
itemElideNewline // elide newline after right delim
|
||||
itemRightParen // ')' inside action
|
||||
itemSpace // run of spaces separating arguments
|
||||
itemString // quoted string (includes quotes)
|
||||
itemText // plain text
|
||||
itemVariable // variable starting with '$', such as '$' or '$1' or '$hello'
|
||||
// Keywords appear after all the rest.
|
||||
itemKeyword // used only to delimit the keywords
|
||||
itemDot // the cursor, spelled '.'
|
||||
itemDefine // define keyword
|
||||
itemElse // else keyword
|
||||
itemEnd // end keyword
|
||||
itemIf // if keyword
|
||||
itemNil // the untyped nil constant, easiest to treat as a keyword
|
||||
itemRange // range keyword
|
||||
itemTemplate // template keyword
|
||||
itemWith // with keyword
|
||||
)
|
||||
|
||||
var key = map[string]itemType{
|
||||
".": itemDot,
|
||||
"define": itemDefine,
|
||||
"else": itemElse,
|
||||
"end": itemEnd,
|
||||
"if": itemIf,
|
||||
"range": itemRange,
|
||||
"nil": itemNil,
|
||||
"template": itemTemplate,
|
||||
"with": itemWith,
|
||||
}
|
||||
|
||||
const eof = -1
|
||||
|
||||
// stateFn represents the state of the scanner as a function that returns the next state.
|
||||
type stateFn func(*lexer) stateFn
|
||||
|
||||
// lexer holds the state of the scanner.
|
||||
type lexer struct {
|
||||
name string // the name of the input; used only for error reports
|
||||
input string // the string being scanned
|
||||
leftDelim string // start of action
|
||||
rightDelim string // end of action
|
||||
state stateFn // the next lexing function to enter
|
||||
pos Pos // current position in the input
|
||||
start Pos // start position of this item
|
||||
width Pos // width of last rune read from input
|
||||
lastPos Pos // position of most recent item returned by nextItem
|
||||
items chan item // channel of scanned items
|
||||
parenDepth int // nesting depth of ( ) exprs
|
||||
}
|
||||
|
||||
// next returns the next rune in the input.
|
||||
func (l *lexer) next() rune {
|
||||
if int(l.pos) >= len(l.input) {
|
||||
l.width = 0
|
||||
return eof
|
||||
}
|
||||
r, w := utf8.DecodeRuneInString(l.input[l.pos:])
|
||||
l.width = Pos(w)
|
||||
l.pos += l.width
|
||||
return r
|
||||
}
|
||||
|
||||
// peek returns but does not consume the next rune in the input.
|
||||
func (l *lexer) peek() rune {
|
||||
r := l.next()
|
||||
l.backup()
|
||||
return r
|
||||
}
|
||||
|
||||
// backup steps back one rune. Can only be called once per call of next.
|
||||
func (l *lexer) backup() {
|
||||
l.pos -= l.width
|
||||
}
|
||||
|
||||
// emit passes an item back to the client.
|
||||
func (l *lexer) emit(t itemType) {
|
||||
l.items <- item{t, l.start, l.input[l.start:l.pos]}
|
||||
l.start = l.pos
|
||||
}
|
||||
|
||||
// ignore skips over the pending input before this point.
|
||||
func (l *lexer) ignore() {
|
||||
l.start = l.pos
|
||||
}
|
||||
|
||||
// accept consumes the next rune if it's from the valid set.
|
||||
func (l *lexer) accept(valid string) bool {
|
||||
if strings.IndexRune(valid, l.next()) >= 0 {
|
||||
return true
|
||||
}
|
||||
l.backup()
|
||||
return false
|
||||
}
|
||||
|
||||
// acceptRun consumes a run of runes from the valid set.
|
||||
func (l *lexer) acceptRun(valid string) {
|
||||
for strings.IndexRune(valid, l.next()) >= 0 {
|
||||
}
|
||||
l.backup()
|
||||
}
|
||||
|
||||
// lineNumber reports which line we're on, based on the position of
|
||||
// the previous item returned by nextItem. Doing it this way
|
||||
// means we don't have to worry about peek double counting.
|
||||
func (l *lexer) lineNumber() int {
|
||||
return 1 + strings.Count(l.input[:l.lastPos], "\n")
|
||||
}
|
||||
|
||||
// errorf returns an error token and terminates the scan by passing
|
||||
// back a nil pointer that will be the next state, terminating l.nextItem.
|
||||
func (l *lexer) errorf(format string, args ...interface{}) stateFn {
|
||||
l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)}
|
||||
return nil
|
||||
}
|
||||
|
||||
// nextItem returns the next item from the input.
|
||||
func (l *lexer) nextItem() item {
|
||||
item := <-l.items
|
||||
l.lastPos = item.pos
|
||||
return item
|
||||
}
|
||||
|
||||
// lex creates a new scanner for the input string.
|
||||
func lex(name, input, left, right string) *lexer {
|
||||
if left == "" {
|
||||
left = leftDelim
|
||||
}
|
||||
if right == "" {
|
||||
right = rightDelim
|
||||
}
|
||||
l := &lexer{
|
||||
name: name,
|
||||
input: input,
|
||||
leftDelim: left,
|
||||
rightDelim: right,
|
||||
items: make(chan item),
|
||||
}
|
||||
go l.run()
|
||||
return l
|
||||
}
|
||||
|
||||
// run runs the state machine for the lexer.
|
||||
func (l *lexer) run() {
|
||||
for l.state = lexText; l.state != nil; {
|
||||
l.state = l.state(l)
|
||||
}
|
||||
}
|
||||
|
||||
// state functions
|
||||
|
||||
const (
|
||||
leftDelim = "{{"
|
||||
rightDelim = "}}"
|
||||
leftComment = "/*"
|
||||
rightComment = "*/"
|
||||
)
|
||||
|
||||
// lexText scans until an opening action delimiter, "{{".
|
||||
func lexText(l *lexer) stateFn {
|
||||
for {
|
||||
if strings.HasPrefix(l.input[l.pos:], l.leftDelim) {
|
||||
if l.pos > l.start {
|
||||
l.emit(itemText)
|
||||
}
|
||||
return lexLeftDelim
|
||||
}
|
||||
if l.next() == eof {
|
||||
break
|
||||
}
|
||||
}
|
||||
// Correctly reached EOF.
|
||||
if l.pos > l.start {
|
||||
l.emit(itemText)
|
||||
}
|
||||
l.emit(itemEOF)
|
||||
return nil
|
||||
}
|
||||
|
||||
// lexLeftDelim scans the left delimiter, which is known to be present.
|
||||
func lexLeftDelim(l *lexer) stateFn {
|
||||
l.pos += Pos(len(l.leftDelim))
|
||||
if strings.HasPrefix(l.input[l.pos:], leftComment) {
|
||||
return lexComment
|
||||
}
|
||||
l.emit(itemLeftDelim)
|
||||
l.parenDepth = 0
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// lexComment scans a comment. The left comment marker is known to be present.
|
||||
func lexComment(l *lexer) stateFn {
|
||||
l.pos += Pos(len(leftComment))
|
||||
i := strings.Index(l.input[l.pos:], rightComment)
|
||||
if i < 0 {
|
||||
return l.errorf("unclosed comment")
|
||||
}
|
||||
l.pos += Pos(i + len(rightComment))
|
||||
if !strings.HasPrefix(l.input[l.pos:], l.rightDelim) {
|
||||
return l.errorf("comment ends before closing delimiter")
|
||||
|
||||
}
|
||||
l.pos += Pos(len(l.rightDelim))
|
||||
l.ignore()
|
||||
return lexText
|
||||
}
|
||||
|
||||
// lexRightDelim scans the right delimiter, which is known to be present.
|
||||
func lexRightDelim(l *lexer) stateFn {
|
||||
l.pos += Pos(len(l.rightDelim))
|
||||
l.emit(itemRightDelim)
|
||||
if l.peek() == '\\' {
|
||||
l.pos++
|
||||
l.emit(itemElideNewline)
|
||||
}
|
||||
return lexText
|
||||
}
|
||||
|
||||
// lexInsideAction scans the elements inside action delimiters.
|
||||
func lexInsideAction(l *lexer) stateFn {
|
||||
// Either number, quoted string, or identifier.
|
||||
// Spaces separate arguments; runs of spaces turn into itemSpace.
|
||||
// Pipe symbols separate and are emitted.
|
||||
if strings.HasPrefix(l.input[l.pos:], l.rightDelim+"\\") || strings.HasPrefix(l.input[l.pos:], l.rightDelim) {
|
||||
if l.parenDepth == 0 {
|
||||
return lexRightDelim
|
||||
}
|
||||
return l.errorf("unclosed left paren")
|
||||
}
|
||||
switch r := l.next(); {
|
||||
case r == eof || isEndOfLine(r):
|
||||
return l.errorf("unclosed action")
|
||||
case isSpace(r):
|
||||
return lexSpace
|
||||
case r == ':':
|
||||
if l.next() != '=' {
|
||||
return l.errorf("expected :=")
|
||||
}
|
||||
l.emit(itemColonEquals)
|
||||
case r == '|':
|
||||
l.emit(itemPipe)
|
||||
case r == '"':
|
||||
return lexQuote
|
||||
case r == '`':
|
||||
return lexRawQuote
|
||||
case r == '$':
|
||||
return lexVariable
|
||||
case r == '\'':
|
||||
return lexChar
|
||||
case r == '.':
|
||||
// special look-ahead for ".field" so we don't break l.backup().
|
||||
if l.pos < Pos(len(l.input)) {
|
||||
r := l.input[l.pos]
|
||||
if r < '0' || '9' < r {
|
||||
return lexField
|
||||
}
|
||||
}
|
||||
fallthrough // '.' can start a number.
|
||||
case r == '+' || r == '-' || ('0' <= r && r <= '9'):
|
||||
l.backup()
|
||||
return lexNumber
|
||||
case isAlphaNumeric(r):
|
||||
l.backup()
|
||||
return lexIdentifier
|
||||
case r == '(':
|
||||
l.emit(itemLeftParen)
|
||||
l.parenDepth++
|
||||
return lexInsideAction
|
||||
case r == ')':
|
||||
l.emit(itemRightParen)
|
||||
l.parenDepth--
|
||||
if l.parenDepth < 0 {
|
||||
return l.errorf("unexpected right paren %#U", r)
|
||||
}
|
||||
return lexInsideAction
|
||||
case r <= unicode.MaxASCII && unicode.IsPrint(r):
|
||||
l.emit(itemChar)
|
||||
return lexInsideAction
|
||||
default:
|
||||
return l.errorf("unrecognized character in action: %#U", r)
|
||||
}
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// lexSpace scans a run of space characters.
|
||||
// One space has already been seen.
|
||||
func lexSpace(l *lexer) stateFn {
|
||||
for isSpace(l.peek()) {
|
||||
l.next()
|
||||
}
|
||||
l.emit(itemSpace)
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// lexIdentifier scans an alphanumeric.
|
||||
func lexIdentifier(l *lexer) stateFn {
|
||||
Loop:
|
||||
for {
|
||||
switch r := l.next(); {
|
||||
case isAlphaNumeric(r):
|
||||
// absorb.
|
||||
default:
|
||||
l.backup()
|
||||
word := l.input[l.start:l.pos]
|
||||
if !l.atTerminator() {
|
||||
return l.errorf("bad character %#U", r)
|
||||
}
|
||||
switch {
|
||||
case key[word] > itemKeyword:
|
||||
l.emit(key[word])
|
||||
case word[0] == '.':
|
||||
l.emit(itemField)
|
||||
case word == "true", word == "false":
|
||||
l.emit(itemBool)
|
||||
default:
|
||||
l.emit(itemIdentifier)
|
||||
}
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// lexField scans a field: .Alphanumeric.
|
||||
// The . has been scanned.
|
||||
func lexField(l *lexer) stateFn {
|
||||
return lexFieldOrVariable(l, itemField)
|
||||
}
|
||||
|
||||
// lexVariable scans a Variable: $Alphanumeric.
|
||||
// The $ has been scanned.
|
||||
func lexVariable(l *lexer) stateFn {
|
||||
if l.atTerminator() { // Nothing interesting follows -> "$".
|
||||
l.emit(itemVariable)
|
||||
return lexInsideAction
|
||||
}
|
||||
return lexFieldOrVariable(l, itemVariable)
|
||||
}
|
||||
|
||||
// lexVariable scans a field or variable: [.$]Alphanumeric.
|
||||
// The . or $ has been scanned.
|
||||
func lexFieldOrVariable(l *lexer, typ itemType) stateFn {
|
||||
if l.atTerminator() { // Nothing interesting follows -> "." or "$".
|
||||
if typ == itemVariable {
|
||||
l.emit(itemVariable)
|
||||
} else {
|
||||
l.emit(itemDot)
|
||||
}
|
||||
return lexInsideAction
|
||||
}
|
||||
var r rune
|
||||
for {
|
||||
r = l.next()
|
||||
if !isAlphaNumeric(r) {
|
||||
l.backup()
|
||||
break
|
||||
}
|
||||
}
|
||||
if !l.atTerminator() {
|
||||
return l.errorf("bad character %#U", r)
|
||||
}
|
||||
l.emit(typ)
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// atTerminator reports whether the input is at valid termination character to
|
||||
// appear after an identifier. Breaks .X.Y into two pieces. Also catches cases
|
||||
// like "$x+2" not being acceptable without a space, in case we decide one
|
||||
// day to implement arithmetic.
|
||||
func (l *lexer) atTerminator() bool {
|
||||
r := l.peek()
|
||||
if isSpace(r) || isEndOfLine(r) {
|
||||
return true
|
||||
}
|
||||
switch r {
|
||||
case eof, '.', ',', '|', ':', ')', '(':
|
||||
return true
|
||||
}
|
||||
// Does r start the delimiter? This can be ambiguous (with delim=="//", $x/2 will
|
||||
// succeed but should fail) but only in extremely rare cases caused by willfully
|
||||
// bad choice of delimiter.
|
||||
if rd, _ := utf8.DecodeRuneInString(l.rightDelim); rd == r {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// lexChar scans a character constant. The initial quote is already
|
||||
// scanned. Syntax checking is done by the parser.
|
||||
func lexChar(l *lexer) stateFn {
|
||||
Loop:
|
||||
for {
|
||||
switch l.next() {
|
||||
case '\\':
|
||||
if r := l.next(); r != eof && r != '\n' {
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
case eof, '\n':
|
||||
return l.errorf("unterminated character constant")
|
||||
case '\'':
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
l.emit(itemCharConstant)
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// lexNumber scans a number: decimal, octal, hex, float, or imaginary. This
|
||||
// isn't a perfect number scanner - for instance it accepts "." and "0x0.2"
|
||||
// and "089" - but when it's wrong the input is invalid and the parser (via
|
||||
// strconv) will notice.
|
||||
func lexNumber(l *lexer) stateFn {
|
||||
if !l.scanNumber() {
|
||||
return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
|
||||
}
|
||||
if sign := l.peek(); sign == '+' || sign == '-' {
|
||||
// Complex: 1+2i. No spaces, must end in 'i'.
|
||||
if !l.scanNumber() || l.input[l.pos-1] != 'i' {
|
||||
return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
|
||||
}
|
||||
l.emit(itemComplex)
|
||||
} else {
|
||||
l.emit(itemNumber)
|
||||
}
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
func (l *lexer) scanNumber() bool {
|
||||
// Optional leading sign.
|
||||
l.accept("+-")
|
||||
// Is it hex?
|
||||
digits := "0123456789"
|
||||
if l.accept("0") && l.accept("xX") {
|
||||
digits = "0123456789abcdefABCDEF"
|
||||
}
|
||||
l.acceptRun(digits)
|
||||
if l.accept(".") {
|
||||
l.acceptRun(digits)
|
||||
}
|
||||
if l.accept("eE") {
|
||||
l.accept("+-")
|
||||
l.acceptRun("0123456789")
|
||||
}
|
||||
// Is it imaginary?
|
||||
l.accept("i")
|
||||
// Next thing mustn't be alphanumeric.
|
||||
if isAlphaNumeric(l.peek()) {
|
||||
l.next()
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// lexQuote scans a quoted string.
|
||||
func lexQuote(l *lexer) stateFn {
|
||||
Loop:
|
||||
for {
|
||||
switch l.next() {
|
||||
case '\\':
|
||||
if r := l.next(); r != eof && r != '\n' {
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
case eof, '\n':
|
||||
return l.errorf("unterminated quoted string")
|
||||
case '"':
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
l.emit(itemString)
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// lexRawQuote scans a raw quoted string.
|
||||
func lexRawQuote(l *lexer) stateFn {
|
||||
Loop:
|
||||
for {
|
||||
switch l.next() {
|
||||
case eof, '\n':
|
||||
return l.errorf("unterminated raw quoted string")
|
||||
case '`':
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
l.emit(itemRawString)
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// isSpace reports whether r is a space character.
|
||||
func isSpace(r rune) bool {
|
||||
return r == ' ' || r == '\t'
|
||||
}
|
||||
|
||||
// isEndOfLine reports whether r is an end-of-line character.
|
||||
func isEndOfLine(r rune) bool {
|
||||
return r == '\r' || r == '\n'
|
||||
}
|
||||
|
||||
// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
|
||||
func isAlphaNumeric(r rune) bool {
|
||||
return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
|
||||
}
|
||||
834
vendor/github.com/alecthomas/template/parse/node.go
generated
vendored
Normal file
@ -0,0 +1,834 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Parse nodes.
|
||||
|
||||
package parse
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var textFormat = "%s" // Changed to "%q" in tests for better error messages.
|
||||
|
||||
// A Node is an element in the parse tree. The interface is trivial.
|
||||
// The interface contains an unexported method so that only
|
||||
// types local to this package can satisfy it.
|
||||
type Node interface {
|
||||
Type() NodeType
|
||||
String() string
|
||||
// Copy does a deep copy of the Node and all its components.
|
||||
// To avoid type assertions, some XxxNodes also have specialized
|
||||
// CopyXxx methods that return *XxxNode.
|
||||
Copy() Node
|
||||
Position() Pos // byte position of start of node in full original input string
|
||||
// tree returns the containing *Tree.
|
||||
// It is unexported so all implementations of Node are in this package.
|
||||
tree() *Tree
|
||||
}
|
||||
|
||||
// NodeType identifies the type of a parse tree node.
|
||||
type NodeType int
|
||||
|
||||
// Pos represents a byte position in the original input text from which
|
||||
// this template was parsed.
|
||||
type Pos int
|
||||
|
||||
func (p Pos) Position() Pos {
|
||||
return p
|
||||
}
|
||||
|
||||
// Type returns itself and provides an easy default implementation
|
||||
// for embedding in a Node. Embedded in all non-trivial Nodes.
|
||||
func (t NodeType) Type() NodeType {
|
||||
return t
|
||||
}
|
||||
|
||||
const (
|
||||
NodeText NodeType = iota // Plain text.
|
||||
NodeAction // A non-control action such as a field evaluation.
|
||||
NodeBool // A boolean constant.
|
||||
NodeChain // A sequence of field accesses.
|
||||
NodeCommand // An element of a pipeline.
|
||||
NodeDot // The cursor, dot.
|
||||
nodeElse // An else action. Not added to tree.
|
||||
nodeEnd // An end action. Not added to tree.
|
||||
NodeField // A field or method name.
|
||||
NodeIdentifier // An identifier; always a function name.
|
||||
NodeIf // An if action.
|
||||
NodeList // A list of Nodes.
|
||||
NodeNil // An untyped nil constant.
|
||||
NodeNumber // A numerical constant.
|
||||
NodePipe // A pipeline of commands.
|
||||
NodeRange // A range action.
|
||||
NodeString // A string constant.
|
||||
NodeTemplate // A template invocation action.
|
||||
NodeVariable // A $ variable.
|
||||
NodeWith // A with action.
|
||||
)
|
||||
|
||||
// Nodes.
|
||||
|
||||
// ListNode holds a sequence of nodes.
|
||||
type ListNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Nodes []Node // The element nodes in lexical order.
|
||||
}
|
||||
|
||||
func (t *Tree) newList(pos Pos) *ListNode {
|
||||
return &ListNode{tr: t, NodeType: NodeList, Pos: pos}
|
||||
}
|
||||
|
||||
func (l *ListNode) append(n Node) {
|
||||
l.Nodes = append(l.Nodes, n)
|
||||
}
|
||||
|
||||
func (l *ListNode) tree() *Tree {
|
||||
return l.tr
|
||||
}
|
||||
|
||||
func (l *ListNode) String() string {
|
||||
b := new(bytes.Buffer)
|
||||
for _, n := range l.Nodes {
|
||||
fmt.Fprint(b, n)
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func (l *ListNode) CopyList() *ListNode {
|
||||
if l == nil {
|
||||
return l
|
||||
}
|
||||
n := l.tr.newList(l.Pos)
|
||||
for _, elem := range l.Nodes {
|
||||
n.append(elem.Copy())
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (l *ListNode) Copy() Node {
|
||||
return l.CopyList()
|
||||
}
|
||||
|
||||
// TextNode holds plain text.
|
||||
type TextNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Text []byte // The text; may span newlines.
|
||||
}
|
||||
|
||||
func (t *Tree) newText(pos Pos, text string) *TextNode {
|
||||
return &TextNode{tr: t, NodeType: NodeText, Pos: pos, Text: []byte(text)}
|
||||
}
|
||||
|
||||
func (t *TextNode) String() string {
|
||||
return fmt.Sprintf(textFormat, t.Text)
|
||||
}
|
||||
|
||||
func (t *TextNode) tree() *Tree {
|
||||
return t.tr
|
||||
}
|
||||
|
||||
func (t *TextNode) Copy() Node {
|
||||
return &TextNode{tr: t.tr, NodeType: NodeText, Pos: t.Pos, Text: append([]byte{}, t.Text...)}
|
||||
}
|
||||
|
||||
// PipeNode holds a pipeline with optional declaration
|
||||
type PipeNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Line int // The line number in the input (deprecated; kept for compatibility)
|
||||
Decl []*VariableNode // Variable declarations in lexical order.
|
||||
Cmds []*CommandNode // The commands in lexical order.
|
||||
}
|
||||
|
||||
func (t *Tree) newPipeline(pos Pos, line int, decl []*VariableNode) *PipeNode {
|
||||
return &PipeNode{tr: t, NodeType: NodePipe, Pos: pos, Line: line, Decl: decl}
|
||||
}
|
||||
|
||||
func (p *PipeNode) append(command *CommandNode) {
|
||||
p.Cmds = append(p.Cmds, command)
|
||||
}
|
||||
|
||||
func (p *PipeNode) String() string {
|
||||
s := ""
|
||||
if len(p.Decl) > 0 {
|
||||
for i, v := range p.Decl {
|
||||
if i > 0 {
|
||||
s += ", "
|
||||
}
|
||||
s += v.String()
|
||||
}
|
||||
s += " := "
|
||||
}
|
||||
for i, c := range p.Cmds {
|
||||
if i > 0 {
|
||||
s += " | "
|
||||
}
|
||||
s += c.String()
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (p *PipeNode) tree() *Tree {
|
||||
return p.tr
|
||||
}
|
||||
|
||||
func (p *PipeNode) CopyPipe() *PipeNode {
|
||||
if p == nil {
|
||||
return p
|
||||
}
|
||||
var decl []*VariableNode
|
||||
for _, d := range p.Decl {
|
||||
decl = append(decl, d.Copy().(*VariableNode))
|
||||
}
|
||||
n := p.tr.newPipeline(p.Pos, p.Line, decl)
|
||||
for _, c := range p.Cmds {
|
||||
n.append(c.Copy().(*CommandNode))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (p *PipeNode) Copy() Node {
|
||||
return p.CopyPipe()
|
||||
}
|
||||
|
||||
// ActionNode holds an action (something bounded by delimiters).
|
||||
// Control actions have their own nodes; ActionNode represents simple
|
||||
// ones such as field evaluations and parenthesized pipelines.
|
||||
type ActionNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Line int // The line number in the input (deprecated; kept for compatibility)
|
||||
Pipe *PipeNode // The pipeline in the action.
|
||||
}
|
||||
|
||||
func (t *Tree) newAction(pos Pos, line int, pipe *PipeNode) *ActionNode {
|
||||
return &ActionNode{tr: t, NodeType: NodeAction, Pos: pos, Line: line, Pipe: pipe}
|
||||
}
|
||||
|
||||
func (a *ActionNode) String() string {
|
||||
return fmt.Sprintf("{{%s}}", a.Pipe)
|
||||
|
||||
}
|
||||
|
||||
func (a *ActionNode) tree() *Tree {
|
||||
return a.tr
|
||||
}
|
||||
|
||||
func (a *ActionNode) Copy() Node {
|
||||
return a.tr.newAction(a.Pos, a.Line, a.Pipe.CopyPipe())
|
||||
|
||||
}
|
||||
|
||||
// CommandNode holds a command (a pipeline inside an evaluating action).
|
||||
type CommandNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Args []Node // Arguments in lexical order: Identifier, field, or constant.
|
||||
}
|
||||
|
||||
func (t *Tree) newCommand(pos Pos) *CommandNode {
|
||||
return &CommandNode{tr: t, NodeType: NodeCommand, Pos: pos}
|
||||
}
|
||||
|
||||
func (c *CommandNode) append(arg Node) {
|
||||
c.Args = append(c.Args, arg)
|
||||
}
|
||||
|
||||
func (c *CommandNode) String() string {
|
||||
s := ""
|
||||
for i, arg := range c.Args {
|
||||
if i > 0 {
|
||||
s += " "
|
||||
}
|
||||
if arg, ok := arg.(*PipeNode); ok {
|
||||
s += "(" + arg.String() + ")"
|
||||
continue
|
||||
}
|
||||
s += arg.String()
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (c *CommandNode) tree() *Tree {
|
||||
return c.tr
|
||||
}
|
||||
|
||||
func (c *CommandNode) Copy() Node {
|
||||
if c == nil {
|
||||
return c
|
||||
}
|
||||
n := c.tr.newCommand(c.Pos)
|
||||
for _, c := range c.Args {
|
||||
n.append(c.Copy())
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// IdentifierNode holds an identifier.
|
||||
type IdentifierNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Ident string // The identifier's name.
|
||||
}
|
||||
|
||||
// NewIdentifier returns a new IdentifierNode with the given identifier name.
|
||||
func NewIdentifier(ident string) *IdentifierNode {
|
||||
return &IdentifierNode{NodeType: NodeIdentifier, Ident: ident}
|
||||
}
|
||||
|
||||
// SetPos sets the position. NewIdentifier is a public method so we can't modify its signature.
|
||||
// Chained for convenience.
|
||||
// TODO: fix one day?
|
||||
func (i *IdentifierNode) SetPos(pos Pos) *IdentifierNode {
|
||||
i.Pos = pos
|
||||
return i
|
||||
}
|
||||
|
||||
// SetTree sets the parent tree for the node. NewIdentifier is a public method so we can't modify its signature.
|
||||
// Chained for convenience.
|
||||
// TODO: fix one day?
|
||||
func (i *IdentifierNode) SetTree(t *Tree) *IdentifierNode {
|
||||
i.tr = t
|
||||
return i
|
||||
}
|
||||
|
||||
func (i *IdentifierNode) String() string {
|
||||
return i.Ident
|
||||
}
|
||||
|
||||
func (i *IdentifierNode) tree() *Tree {
|
||||
return i.tr
|
||||
}
|
||||
|
||||
func (i *IdentifierNode) Copy() Node {
|
||||
return NewIdentifier(i.Ident).SetTree(i.tr).SetPos(i.Pos)
|
||||
}
|
||||
|
||||
// VariableNode holds a list of variable names, possibly with chained field
|
||||
// accesses. The dollar sign is part of the (first) name.
|
||||
type VariableNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Ident []string // Variable name and fields in lexical order.
|
||||
}
|
||||
|
||||
func (t *Tree) newVariable(pos Pos, ident string) *VariableNode {
|
||||
return &VariableNode{tr: t, NodeType: NodeVariable, Pos: pos, Ident: strings.Split(ident, ".")}
|
||||
}
|
||||
|
||||
func (v *VariableNode) String() string {
|
||||
s := ""
|
||||
for i, id := range v.Ident {
|
||||
if i > 0 {
|
||||
s += "."
|
||||
}
|
||||
s += id
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (v *VariableNode) tree() *Tree {
|
||||
return v.tr
|
||||
}
|
||||
|
||||
func (v *VariableNode) Copy() Node {
|
||||
return &VariableNode{tr: v.tr, NodeType: NodeVariable, Pos: v.Pos, Ident: append([]string{}, v.Ident...)}
|
||||
}
|
||||
|
||||
// DotNode holds the special identifier '.'.
|
||||
type DotNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
}
|
||||
|
||||
func (t *Tree) newDot(pos Pos) *DotNode {
|
||||
return &DotNode{tr: t, NodeType: NodeDot, Pos: pos}
|
||||
}
|
||||
|
||||
func (d *DotNode) Type() NodeType {
|
||||
// Override method on embedded NodeType for API compatibility.
|
||||
// TODO: Not really a problem; could change API without effect but
|
||||
// api tool complains.
|
||||
return NodeDot
|
||||
}
|
||||
|
||||
func (d *DotNode) String() string {
|
||||
return "."
|
||||
}
|
||||
|
||||
func (d *DotNode) tree() *Tree {
|
||||
return d.tr
|
||||
}
|
||||
|
||||
func (d *DotNode) Copy() Node {
|
||||
return d.tr.newDot(d.Pos)
|
||||
}
|
||||
|
||||
// NilNode holds the special identifier 'nil' representing an untyped nil constant.
|
||||
type NilNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
}
|
||||
|
||||
func (t *Tree) newNil(pos Pos) *NilNode {
|
||||
return &NilNode{tr: t, NodeType: NodeNil, Pos: pos}
|
||||
}
|
||||
|
||||
func (n *NilNode) Type() NodeType {
|
||||
// Override method on embedded NodeType for API compatibility.
|
||||
// TODO: Not really a problem; could change API without effect but
|
||||
// api tool complains.
|
||||
return NodeNil
|
||||
}
|
||||
|
||||
func (n *NilNode) String() string {
|
||||
return "nil"
|
||||
}
|
||||
|
||||
func (n *NilNode) tree() *Tree {
|
||||
return n.tr
|
||||
}
|
||||
|
||||
func (n *NilNode) Copy() Node {
|
||||
return n.tr.newNil(n.Pos)
|
||||
}
|
||||
|
||||
// FieldNode holds a field (identifier starting with '.').
|
||||
// The names may be chained ('.x.y').
|
||||
// The period is dropped from each ident.
|
||||
type FieldNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Ident []string // The identifiers in lexical order.
|
||||
}
|
||||
|
||||
func (t *Tree) newField(pos Pos, ident string) *FieldNode {
|
||||
return &FieldNode{tr: t, NodeType: NodeField, Pos: pos, Ident: strings.Split(ident[1:], ".")} // [1:] to drop leading period
|
||||
}
|
||||
|
||||
func (f *FieldNode) String() string {
|
||||
s := ""
|
||||
for _, id := range f.Ident {
|
||||
s += "." + id
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (f *FieldNode) tree() *Tree {
|
||||
return f.tr
|
||||
}
|
||||
|
||||
func (f *FieldNode) Copy() Node {
|
||||
return &FieldNode{tr: f.tr, NodeType: NodeField, Pos: f.Pos, Ident: append([]string{}, f.Ident...)}
|
||||
}
|
||||
|
||||
// ChainNode holds a term followed by a chain of field accesses (identifier starting with '.').
|
||||
// The names may be chained ('.x.y').
|
||||
// The periods are dropped from each ident.
|
||||
type ChainNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Node Node
|
||||
Field []string // The identifiers in lexical order.
|
||||
}
|
||||
|
||||
func (t *Tree) newChain(pos Pos, node Node) *ChainNode {
|
||||
return &ChainNode{tr: t, NodeType: NodeChain, Pos: pos, Node: node}
|
||||
}
|
||||
|
||||
// Add adds the named field (which should start with a period) to the end of the chain.
|
||||
func (c *ChainNode) Add(field string) {
|
||||
if len(field) == 0 || field[0] != '.' {
|
||||
panic("no dot in field")
|
||||
}
|
||||
field = field[1:] // Remove leading dot.
|
||||
if field == "" {
|
||||
panic("empty field")
|
||||
}
|
||||
c.Field = append(c.Field, field)
|
||||
}
|
||||
|
||||
func (c *ChainNode) String() string {
|
||||
s := c.Node.String()
|
||||
if _, ok := c.Node.(*PipeNode); ok {
|
||||
s = "(" + s + ")"
|
||||
}
|
||||
for _, field := range c.Field {
|
||||
s += "." + field
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (c *ChainNode) tree() *Tree {
|
||||
return c.tr
|
||||
}
|
||||
|
||||
func (c *ChainNode) Copy() Node {
|
||||
return &ChainNode{tr: c.tr, NodeType: NodeChain, Pos: c.Pos, Node: c.Node, Field: append([]string{}, c.Field...)}
|
||||
}
|
||||
|
||||
// BoolNode holds a boolean constant.
|
||||
type BoolNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
True bool // The value of the boolean constant.
|
||||
}
|
||||
|
||||
func (t *Tree) newBool(pos Pos, true bool) *BoolNode {
|
||||
return &BoolNode{tr: t, NodeType: NodeBool, Pos: pos, True: true}
|
||||
}
|
||||
|
||||
func (b *BoolNode) String() string {
|
||||
if b.True {
|
||||
return "true"
|
||||
}
|
||||
return "false"
|
||||
}
|
||||
|
||||
func (b *BoolNode) tree() *Tree {
|
||||
return b.tr
|
||||
}
|
||||
|
||||
func (b *BoolNode) Copy() Node {
|
||||
return b.tr.newBool(b.Pos, b.True)
|
||||
}
|
||||
|
||||
// NumberNode holds a number: signed or unsigned integer, float, or complex.
|
||||
// The value is parsed and stored under all the types that can represent the value.
|
||||
// This simulates in a small amount of code the behavior of Go's ideal constants.
|
||||
type NumberNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
IsInt bool // Number has an integral value.
|
||||
IsUint bool // Number has an unsigned integral value.
|
||||
IsFloat bool // Number has a floating-point value.
|
||||
IsComplex bool // Number is complex.
|
||||
Int64 int64 // The signed integer value.
|
||||
Uint64 uint64 // The unsigned integer value.
|
||||
Float64 float64 // The floating-point value.
|
||||
Complex128 complex128 // The complex value.
|
||||
Text string // The original textual representation from the input.
|
||||
}
|
||||
|
||||
func (t *Tree) newNumber(pos Pos, text string, typ itemType) (*NumberNode, error) {
|
||||
n := &NumberNode{tr: t, NodeType: NodeNumber, Pos: pos, Text: text}
|
||||
switch typ {
|
||||
case itemCharConstant:
|
||||
rune, _, tail, err := strconv.UnquoteChar(text[1:], text[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if tail != "'" {
|
||||
return nil, fmt.Errorf("malformed character constant: %s", text)
|
||||
}
|
||||
n.Int64 = int64(rune)
|
||||
n.IsInt = true
|
||||
n.Uint64 = uint64(rune)
|
||||
n.IsUint = true
|
||||
n.Float64 = float64(rune) // odd but those are the rules.
|
||||
n.IsFloat = true
|
||||
return n, nil
|
||||
case itemComplex:
|
||||
// fmt.Sscan can parse the pair, so let it do the work.
|
||||
if _, err := fmt.Sscan(text, &n.Complex128); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
n.IsComplex = true
|
||||
n.simplifyComplex()
|
||||
return n, nil
|
||||
}
|
||||
// Imaginary constants can only be complex unless they are zero.
|
||||
if len(text) > 0 && text[len(text)-1] == 'i' {
|
||||
f, err := strconv.ParseFloat(text[:len(text)-1], 64)
|
||||
if err == nil {
|
||||
n.IsComplex = true
|
||||
n.Complex128 = complex(0, f)
|
||||
n.simplifyComplex()
|
||||
return n, nil
|
||||
}
|
||||
}
|
||||
// Do integer test first so we get 0x123 etc.
|
||||
u, err := strconv.ParseUint(text, 0, 64) // will fail for -0; fixed below.
|
||||
if err == nil {
|
||||
n.IsUint = true
|
||||
n.Uint64 = u
|
||||
}
|
||||
i, err := strconv.ParseInt(text, 0, 64)
|
||||
if err == nil {
|
||||
n.IsInt = true
|
||||
n.Int64 = i
|
||||
if i == 0 {
|
||||
n.IsUint = true // in case of -0.
|
||||
n.Uint64 = u
|
||||
}
|
||||
}
|
||||
// If an integer extraction succeeded, promote the float.
|
||||
if n.IsInt {
|
||||
n.IsFloat = true
|
||||
n.Float64 = float64(n.Int64)
|
||||
} else if n.IsUint {
|
||||
n.IsFloat = true
|
||||
n.Float64 = float64(n.Uint64)
|
||||
} else {
|
||||
f, err := strconv.ParseFloat(text, 64)
|
||||
if err == nil {
|
||||
n.IsFloat = true
|
||||
n.Float64 = f
|
||||
// If a floating-point extraction succeeded, extract the int if needed.
|
||||
if !n.IsInt && float64(int64(f)) == f {
|
||||
n.IsInt = true
|
||||
n.Int64 = int64(f)
|
||||
}
|
||||
if !n.IsUint && float64(uint64(f)) == f {
|
||||
n.IsUint = true
|
||||
n.Uint64 = uint64(f)
|
||||
}
|
||||
}
|
||||
}
|
||||
if !n.IsInt && !n.IsUint && !n.IsFloat {
|
||||
return nil, fmt.Errorf("illegal number syntax: %q", text)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// simplifyComplex pulls out any other types that are represented by the complex number.
|
||||
// These all require that the imaginary part be zero.
|
||||
func (n *NumberNode) simplifyComplex() {
|
||||
n.IsFloat = imag(n.Complex128) == 0
|
||||
if n.IsFloat {
|
||||
n.Float64 = real(n.Complex128)
|
||||
n.IsInt = float64(int64(n.Float64)) == n.Float64
|
||||
if n.IsInt {
|
||||
n.Int64 = int64(n.Float64)
|
||||
}
|
||||
n.IsUint = float64(uint64(n.Float64)) == n.Float64
|
||||
if n.IsUint {
|
||||
n.Uint64 = uint64(n.Float64)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (n *NumberNode) String() string {
|
||||
return n.Text
|
||||
}
|
||||
|
||||
func (n *NumberNode) tree() *Tree {
|
||||
return n.tr
|
||||
}
|
||||
|
||||
func (n *NumberNode) Copy() Node {
|
||||
nn := new(NumberNode)
|
||||
*nn = *n // Easy, fast, correct.
|
||||
return nn
|
||||
}
|
||||
|
||||
// StringNode holds a string constant. The value has been "unquoted".
|
||||
type StringNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Quoted string // The original text of the string, with quotes.
|
||||
Text string // The string, after quote processing.
|
||||
}
|
||||
|
||||
func (t *Tree) newString(pos Pos, orig, text string) *StringNode {
|
||||
return &StringNode{tr: t, NodeType: NodeString, Pos: pos, Quoted: orig, Text: text}
|
||||
}
|
||||
|
||||
func (s *StringNode) String() string {
|
||||
return s.Quoted
|
||||
}
|
||||
|
||||
func (s *StringNode) tree() *Tree {
|
||||
return s.tr
|
||||
}
|
||||
|
||||
func (s *StringNode) Copy() Node {
|
||||
return s.tr.newString(s.Pos, s.Quoted, s.Text)
|
||||
}
|
||||
|
||||
// endNode represents an {{end}} action.
|
||||
// It does not appear in the final parse tree.
|
||||
type endNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
}
|
||||
|
||||
func (t *Tree) newEnd(pos Pos) *endNode {
|
||||
return &endNode{tr: t, NodeType: nodeEnd, Pos: pos}
|
||||
}
|
||||
|
||||
func (e *endNode) String() string {
|
||||
return "{{end}}"
|
||||
}
|
||||
|
||||
func (e *endNode) tree() *Tree {
|
||||
return e.tr
|
||||
}
|
||||
|
||||
func (e *endNode) Copy() Node {
|
||||
return e.tr.newEnd(e.Pos)
|
||||
}
|
||||
|
||||
// elseNode represents an {{else}} action. Does not appear in the final tree.
|
||||
type elseNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Line int // The line number in the input (deprecated; kept for compatibility)
|
||||
}
|
||||
|
||||
func (t *Tree) newElse(pos Pos, line int) *elseNode {
|
||||
return &elseNode{tr: t, NodeType: nodeElse, Pos: pos, Line: line}
|
||||
}
|
||||
|
||||
func (e *elseNode) Type() NodeType {
|
||||
return nodeElse
|
||||
}
|
||||
|
||||
func (e *elseNode) String() string {
|
||||
return "{{else}}"
|
||||
}
|
||||
|
||||
func (e *elseNode) tree() *Tree {
|
||||
return e.tr
|
||||
}
|
||||
|
||||
func (e *elseNode) Copy() Node {
|
||||
return e.tr.newElse(e.Pos, e.Line)
|
||||
}
|
||||
|
||||
// BranchNode is the common representation of if, range, and with.
|
||||
type BranchNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Line int // The line number in the input (deprecated; kept for compatibility)
|
||||
Pipe *PipeNode // The pipeline to be evaluated.
|
||||
List *ListNode // What to execute if the value is non-empty.
|
||||
ElseList *ListNode // What to execute if the value is empty (nil if absent).
|
||||
}
|
||||
|
||||
func (b *BranchNode) String() string {
|
||||
name := ""
|
||||
switch b.NodeType {
|
||||
case NodeIf:
|
||||
name = "if"
|
||||
case NodeRange:
|
||||
name = "range"
|
||||
case NodeWith:
|
||||
name = "with"
|
||||
default:
|
||||
panic("unknown branch type")
|
||||
}
|
||||
if b.ElseList != nil {
|
||||
return fmt.Sprintf("{{%s %s}}%s{{else}}%s{{end}}", name, b.Pipe, b.List, b.ElseList)
|
||||
}
|
||||
return fmt.Sprintf("{{%s %s}}%s{{end}}", name, b.Pipe, b.List)
|
||||
}
|
||||
|
||||
func (b *BranchNode) tree() *Tree {
|
||||
return b.tr
|
||||
}
|
||||
|
||||
func (b *BranchNode) Copy() Node {
|
||||
switch b.NodeType {
|
||||
case NodeIf:
|
||||
return b.tr.newIf(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
|
||||
case NodeRange:
|
||||
return b.tr.newRange(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
|
||||
case NodeWith:
|
||||
return b.tr.newWith(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
|
||||
default:
|
||||
panic("unknown branch type")
|
||||
}
|
||||
}
|
||||
|
||||
// IfNode represents an {{if}} action and its commands.
|
||||
type IfNode struct {
|
||||
BranchNode
|
||||
}
|
||||
|
||||
func (t *Tree) newIf(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *IfNode {
|
||||
return &IfNode{BranchNode{tr: t, NodeType: NodeIf, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
|
||||
}
|
||||
|
||||
func (i *IfNode) Copy() Node {
|
||||
return i.tr.newIf(i.Pos, i.Line, i.Pipe.CopyPipe(), i.List.CopyList(), i.ElseList.CopyList())
|
||||
}
|
||||
|
||||
// RangeNode represents a {{range}} action and its commands.
|
||||
type RangeNode struct {
|
||||
BranchNode
|
||||
}
|
||||
|
||||
func (t *Tree) newRange(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *RangeNode {
|
||||
return &RangeNode{BranchNode{tr: t, NodeType: NodeRange, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
|
||||
}
|
||||
|
||||
func (r *RangeNode) Copy() Node {
|
||||
return r.tr.newRange(r.Pos, r.Line, r.Pipe.CopyPipe(), r.List.CopyList(), r.ElseList.CopyList())
|
||||
}
|
||||
|
||||
// WithNode represents a {{with}} action and its commands.
|
||||
type WithNode struct {
|
||||
BranchNode
|
||||
}
|
||||
|
||||
func (t *Tree) newWith(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *WithNode {
|
||||
return &WithNode{BranchNode{tr: t, NodeType: NodeWith, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
|
||||
}
|
||||
|
||||
func (w *WithNode) Copy() Node {
|
||||
return w.tr.newWith(w.Pos, w.Line, w.Pipe.CopyPipe(), w.List.CopyList(), w.ElseList.CopyList())
|
||||
}
|
||||
|
||||
// TemplateNode represents a {{template}} action.
|
||||
type TemplateNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Line int // The line number in the input (deprecated; kept for compatibility)
|
||||
Name string // The name of the template (unquoted).
|
||||
Pipe *PipeNode // The command to evaluate as dot for the template.
|
||||
}
|
||||
|
||||
func (t *Tree) newTemplate(pos Pos, line int, name string, pipe *PipeNode) *TemplateNode {
|
||||
return &TemplateNode{tr: t, NodeType: NodeTemplate, Pos: pos, Line: line, Name: name, Pipe: pipe}
|
||||
}
|
||||
|
||||
func (t *TemplateNode) String() string {
|
||||
if t.Pipe == nil {
|
||||
return fmt.Sprintf("{{template %q}}", t.Name)
|
||||
}
|
||||
return fmt.Sprintf("{{template %q %s}}", t.Name, t.Pipe)
|
||||
}
|
||||
|
||||
func (t *TemplateNode) tree() *Tree {
|
||||
return t.tr
|
||||
}
|
||||
|
||||
func (t *TemplateNode) Copy() Node {
|
||||
return t.tr.newTemplate(t.Pos, t.Line, t.Name, t.Pipe.CopyPipe())
|
||||
}
|
||||
700
vendor/github.com/alecthomas/template/parse/parse.go
generated
vendored
Normal file
@ -0,0 +1,700 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package parse builds parse trees for templates as defined by text/template
|
||||
// and html/template. Clients should use those packages to construct templates
|
||||
// rather than this one, which provides shared internal data structures not
|
||||
// intended for general use.
|
||||
package parse
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Tree is the representation of a single parsed template.
|
||||
type Tree struct {
|
||||
Name string // name of the template represented by the tree.
|
||||
ParseName string // name of the top-level template during parsing, for error messages.
|
||||
Root *ListNode // top-level root of the tree.
|
||||
text string // text parsed to create the template (or its parent)
|
||||
// Parsing only; cleared after parse.
|
||||
funcs []map[string]interface{}
|
||||
lex *lexer
|
||||
token [3]item // three-token lookahead for parser.
|
||||
peekCount int
|
||||
vars []string // variables defined at the moment.
|
||||
}
|
||||
|
||||
// Copy returns a copy of the Tree. Any parsing state is discarded.
|
||||
func (t *Tree) Copy() *Tree {
|
||||
if t == nil {
|
||||
return nil
|
||||
}
|
||||
return &Tree{
|
||||
Name: t.Name,
|
||||
ParseName: t.ParseName,
|
||||
Root: t.Root.CopyList(),
|
||||
text: t.text,
|
||||
}
|
||||
}
|
||||
|
||||
// Parse returns a map from template name to parse.Tree, created by parsing the
|
||||
// templates described in the argument string. The top-level template will be
|
||||
// given the specified name. If an error is encountered, parsing stops and an
|
||||
// empty map is returned with the error.
|
||||
func Parse(name, text, leftDelim, rightDelim string, funcs ...map[string]interface{}) (treeSet map[string]*Tree, err error) {
|
||||
treeSet = make(map[string]*Tree)
|
||||
t := New(name)
|
||||
t.text = text
|
||||
_, err = t.Parse(text, leftDelim, rightDelim, treeSet, funcs...)
|
||||
return
|
||||
}
|
||||
|
||||
// next returns the next token.
|
||||
func (t *Tree) next() item {
|
||||
if t.peekCount > 0 {
|
||||
t.peekCount--
|
||||
} else {
|
||||
t.token[0] = t.lex.nextItem()
|
||||
}
|
||||
return t.token[t.peekCount]
|
||||
}
|
||||
|
||||
// backup backs the input stream up one token.
|
||||
func (t *Tree) backup() {
|
||||
t.peekCount++
|
||||
}
|
||||
|
||||
// backup2 backs the input stream up two tokens.
|
||||
// The zeroth token is already there.
|
||||
func (t *Tree) backup2(t1 item) {
|
||||
t.token[1] = t1
|
||||
t.peekCount = 2
|
||||
}
|
||||
|
||||
// backup3 backs the input stream up three tokens
|
||||
// The zeroth token is already there.
|
||||
func (t *Tree) backup3(t2, t1 item) { // Reverse order: we're pushing back.
|
||||
t.token[1] = t1
|
||||
t.token[2] = t2
|
||||
t.peekCount = 3
|
||||
}
|
||||
|
||||
// peek returns but does not consume the next token.
|
||||
func (t *Tree) peek() item {
|
||||
if t.peekCount > 0 {
|
||||
return t.token[t.peekCount-1]
|
||||
}
|
||||
t.peekCount = 1
|
||||
t.token[0] = t.lex.nextItem()
|
||||
return t.token[0]
|
||||
}
|
||||
|
||||
// nextNonSpace returns the next non-space token.
|
||||
func (t *Tree) nextNonSpace() (token item) {
|
||||
for {
|
||||
token = t.next()
|
||||
if token.typ != itemSpace {
|
||||
break
|
||||
}
|
||||
}
|
||||
return token
|
||||
}
|
||||
|
||||
// peekNonSpace returns but does not consume the next non-space token.
|
||||
func (t *Tree) peekNonSpace() (token item) {
|
||||
for {
|
||||
token = t.next()
|
||||
if token.typ != itemSpace {
|
||||
break
|
||||
}
|
||||
}
|
||||
t.backup()
|
||||
return token
|
||||
}
|
||||
|
||||
// Parsing.
|
||||
|
||||
// New allocates a new parse tree with the given name.
|
||||
func New(name string, funcs ...map[string]interface{}) *Tree {
|
||||
return &Tree{
|
||||
Name: name,
|
||||
funcs: funcs,
|
||||
}
|
||||
}
|
||||
|
||||
// ErrorContext returns a textual representation of the location of the node in the input text.
|
||||
// The receiver is only used when the node does not have a pointer to the tree inside,
|
||||
// which can occur in old code.
|
||||
func (t *Tree) ErrorContext(n Node) (location, context string) {
|
||||
pos := int(n.Position())
|
||||
tree := n.tree()
|
||||
if tree == nil {
|
||||
tree = t
|
||||
}
|
||||
text := tree.text[:pos]
|
||||
byteNum := strings.LastIndex(text, "\n")
|
||||
if byteNum == -1 {
|
||||
byteNum = pos // On first line.
|
||||
} else {
|
||||
byteNum++ // After the newline.
|
||||
byteNum = pos - byteNum
|
||||
}
|
||||
lineNum := 1 + strings.Count(text, "\n")
|
||||
context = n.String()
|
||||
if len(context) > 20 {
|
||||
context = fmt.Sprintf("%.20s...", context)
|
||||
}
|
||||
return fmt.Sprintf("%s:%d:%d", tree.ParseName, lineNum, byteNum), context
|
||||
}
|
||||
|
||||
// errorf formats the error and terminates processing.
|
||||
func (t *Tree) errorf(format string, args ...interface{}) {
|
||||
t.Root = nil
|
||||
format = fmt.Sprintf("template: %s:%d: %s", t.ParseName, t.lex.lineNumber(), format)
|
||||
panic(fmt.Errorf(format, args...))
|
||||
}
|
||||
|
||||
// error terminates processing.
|
||||
func (t *Tree) error(err error) {
|
||||
t.errorf("%s", err)
|
||||
}
|
||||
|
||||
// expect consumes the next token and guarantees it has the required type.
|
||||
func (t *Tree) expect(expected itemType, context string) item {
|
||||
token := t.nextNonSpace()
|
||||
if token.typ != expected {
|
||||
t.unexpected(token, context)
|
||||
}
|
||||
return token
|
||||
}
|
||||
|
||||
// expectOneOf consumes the next token and guarantees it has one of the required types.
|
||||
func (t *Tree) expectOneOf(expected1, expected2 itemType, context string) item {
|
||||
token := t.nextNonSpace()
|
||||
if token.typ != expected1 && token.typ != expected2 {
|
||||
t.unexpected(token, context)
|
||||
}
|
||||
return token
|
||||
}
|
||||
|
||||
// unexpected complains about the token and terminates processing.
|
||||
func (t *Tree) unexpected(token item, context string) {
|
||||
t.errorf("unexpected %s in %s", token, context)
|
||||
}
|
||||
|
||||
// recover is the handler that turns panics into returns from the top level of Parse.
|
||||
func (t *Tree) recover(errp *error) {
|
||||
e := recover()
|
||||
if e != nil {
|
||||
if _, ok := e.(runtime.Error); ok {
|
||||
panic(e)
|
||||
}
|
||||
if t != nil {
|
||||
t.stopParse()
|
||||
}
|
||||
*errp = e.(error)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// startParse initializes the parser, using the lexer.
|
||||
func (t *Tree) startParse(funcs []map[string]interface{}, lex *lexer) {
|
||||
t.Root = nil
|
||||
t.lex = lex
|
||||
t.vars = []string{"$"}
|
||||
t.funcs = funcs
|
||||
}
|
||||
|
||||
// stopParse terminates parsing.
|
||||
func (t *Tree) stopParse() {
|
||||
t.lex = nil
|
||||
t.vars = nil
|
||||
t.funcs = nil
|
||||
}
|
||||
|
||||
// Parse parses the template definition string to construct a representation of
|
||||
// the template for execution. If either action delimiter string is empty, the
|
||||
// default ("{{" or "}}") is used. Embedded template definitions are added to
|
||||
// the treeSet map.
|
||||
func (t *Tree) Parse(text, leftDelim, rightDelim string, treeSet map[string]*Tree, funcs ...map[string]interface{}) (tree *Tree, err error) {
|
||||
defer t.recover(&err)
|
||||
t.ParseName = t.Name
|
||||
t.startParse(funcs, lex(t.Name, text, leftDelim, rightDelim))
|
||||
t.text = text
|
||||
t.parse(treeSet)
|
||||
t.add(treeSet)
|
||||
t.stopParse()
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// add adds tree to the treeSet.
|
||||
func (t *Tree) add(treeSet map[string]*Tree) {
|
||||
tree := treeSet[t.Name]
|
||||
if tree == nil || IsEmptyTree(tree.Root) {
|
||||
treeSet[t.Name] = t
|
||||
return
|
||||
}
|
||||
if !IsEmptyTree(t.Root) {
|
||||
t.errorf("template: multiple definition of template %q", t.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// IsEmptyTree reports whether this tree (node) is empty of everything but space.
|
||||
func IsEmptyTree(n Node) bool {
|
||||
switch n := n.(type) {
|
||||
case nil:
|
||||
return true
|
||||
case *ActionNode:
|
||||
case *IfNode:
|
||||
case *ListNode:
|
||||
for _, node := range n.Nodes {
|
||||
if !IsEmptyTree(node) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case *RangeNode:
|
||||
case *TemplateNode:
|
||||
case *TextNode:
|
||||
return len(bytes.TrimSpace(n.Text)) == 0
|
||||
case *WithNode:
|
||||
default:
|
||||
panic("unknown node: " + n.String())
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// parse is the top-level parser for a template, essentially the same
|
||||
// as itemList except it also parses {{define}} actions.
|
||||
// It runs to EOF.
|
||||
func (t *Tree) parse(treeSet map[string]*Tree) (next Node) {
|
||||
t.Root = t.newList(t.peek().pos)
|
||||
for t.peek().typ != itemEOF {
|
||||
if t.peek().typ == itemLeftDelim {
|
||||
delim := t.next()
|
||||
if t.nextNonSpace().typ == itemDefine {
|
||||
newT := New("definition") // name will be updated once we know it.
|
||||
newT.text = t.text
|
||||
newT.ParseName = t.ParseName
|
||||
newT.startParse(t.funcs, t.lex)
|
||||
newT.parseDefinition(treeSet)
|
||||
continue
|
||||
}
|
||||
t.backup2(delim)
|
||||
}
|
||||
n := t.textOrAction()
|
||||
if n.Type() == nodeEnd {
|
||||
t.errorf("unexpected %s", n)
|
||||
}
|
||||
t.Root.append(n)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseDefinition parses a {{define}} ... {{end}} template definition and
|
||||
// installs the definition in the treeSet map. The "define" keyword has already
|
||||
// been scanned.
|
||||
func (t *Tree) parseDefinition(treeSet map[string]*Tree) {
|
||||
const context = "define clause"
|
||||
name := t.expectOneOf(itemString, itemRawString, context)
|
||||
var err error
|
||||
t.Name, err = strconv.Unquote(name.val)
|
||||
if err != nil {
|
||||
t.error(err)
|
||||
}
|
||||
t.expect(itemRightDelim, context)
|
||||
var end Node
|
||||
t.Root, end = t.itemList()
|
||||
if end.Type() != nodeEnd {
|
||||
t.errorf("unexpected %s in %s", end, context)
|
||||
}
|
||||
t.add(treeSet)
|
||||
t.stopParse()
|
||||
}
|
||||
|
||||
// itemList:
|
||||
// textOrAction*
|
||||
// Terminates at {{end}} or {{else}}, returned separately.
|
||||
func (t *Tree) itemList() (list *ListNode, next Node) {
|
||||
list = t.newList(t.peekNonSpace().pos)
|
||||
for t.peekNonSpace().typ != itemEOF {
|
||||
n := t.textOrAction()
|
||||
switch n.Type() {
|
||||
case nodeEnd, nodeElse:
|
||||
return list, n
|
||||
}
|
||||
list.append(n)
|
||||
}
|
||||
t.errorf("unexpected EOF")
|
||||
return
|
||||
}
|
||||
|
||||
// textOrAction:
|
||||
// text | action
|
||||
func (t *Tree) textOrAction() Node {
|
||||
switch token := t.nextNonSpace(); token.typ {
|
||||
case itemElideNewline:
|
||||
return t.elideNewline()
|
||||
case itemText:
|
||||
return t.newText(token.pos, token.val)
|
||||
case itemLeftDelim:
|
||||
return t.action()
|
||||
default:
|
||||
t.unexpected(token, "input")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// elideNewline:
|
||||
// Remove newlines trailing rightDelim if \\ is present.
|
||||
func (t *Tree) elideNewline() Node {
|
||||
token := t.peek()
|
||||
if token.typ != itemText {
|
||||
t.unexpected(token, "input")
|
||||
return nil
|
||||
}
|
||||
|
||||
t.next()
|
||||
stripped := strings.TrimLeft(token.val, "\n\r")
|
||||
diff := len(token.val) - len(stripped)
|
||||
if diff > 0 {
|
||||
// This is a bit nasty. We mutate the token in-place to remove
|
||||
// preceding newlines.
|
||||
token.pos += Pos(diff)
|
||||
token.val = stripped
|
||||
}
|
||||
return t.newText(token.pos, token.val)
|
||||
}
|
||||
|
||||
// Action:
|
||||
// control
|
||||
// command ("|" command)*
|
||||
// Left delim is past. Now get actions.
|
||||
// First word could be a keyword such as range.
|
||||
func (t *Tree) action() (n Node) {
|
||||
switch token := t.nextNonSpace(); token.typ {
|
||||
case itemElse:
|
||||
return t.elseControl()
|
||||
case itemEnd:
|
||||
return t.endControl()
|
||||
case itemIf:
|
||||
return t.ifControl()
|
||||
case itemRange:
|
||||
return t.rangeControl()
|
||||
case itemTemplate:
|
||||
return t.templateControl()
|
||||
case itemWith:
|
||||
return t.withControl()
|
||||
}
|
||||
t.backup()
|
||||
// Do not pop variables; they persist until "end".
|
||||
return t.newAction(t.peek().pos, t.lex.lineNumber(), t.pipeline("command"))
|
||||
}
|
||||
|
||||
// Pipeline:
|
||||
// declarations? command ('|' command)*
|
||||
func (t *Tree) pipeline(context string) (pipe *PipeNode) {
|
||||
var decl []*VariableNode
|
||||
pos := t.peekNonSpace().pos
|
||||
// Are there declarations?
|
||||
for {
|
||||
if v := t.peekNonSpace(); v.typ == itemVariable {
|
||||
t.next()
|
||||
// Since space is a token, we need 3-token look-ahead here in the worst case:
|
||||
// in "$x foo" we need to read "foo" (as opposed to ":=") to know that $x is an
|
||||
// argument variable rather than a declaration. So remember the token
|
||||
// adjacent to the variable so we can push it back if necessary.
|
||||
tokenAfterVariable := t.peek()
|
||||
if next := t.peekNonSpace(); next.typ == itemColonEquals || (next.typ == itemChar && next.val == ",") {
|
||||
t.nextNonSpace()
|
||||
variable := t.newVariable(v.pos, v.val)
|
||||
decl = append(decl, variable)
|
||||
t.vars = append(t.vars, v.val)
|
||||
if next.typ == itemChar && next.val == "," {
|
||||
if context == "range" && len(decl) < 2 {
|
||||
continue
|
||||
}
|
||||
t.errorf("too many declarations in %s", context)
|
||||
}
|
||||
} else if tokenAfterVariable.typ == itemSpace {
|
||||
t.backup3(v, tokenAfterVariable)
|
||||
} else {
|
||||
t.backup2(v)
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
pipe = t.newPipeline(pos, t.lex.lineNumber(), decl)
|
||||
for {
|
||||
switch token := t.nextNonSpace(); token.typ {
|
||||
case itemRightDelim, itemRightParen:
|
||||
if len(pipe.Cmds) == 0 {
|
||||
t.errorf("missing value for %s", context)
|
||||
}
|
||||
if token.typ == itemRightParen {
|
||||
t.backup()
|
||||
}
|
||||
return
|
||||
case itemBool, itemCharConstant, itemComplex, itemDot, itemField, itemIdentifier,
|
||||
itemNumber, itemNil, itemRawString, itemString, itemVariable, itemLeftParen:
|
||||
t.backup()
|
||||
pipe.append(t.command())
|
||||
default:
|
||||
t.unexpected(token, context)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tree) parseControl(allowElseIf bool, context string) (pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) {
|
||||
defer t.popVars(len(t.vars))
|
||||
line = t.lex.lineNumber()
|
||||
pipe = t.pipeline(context)
|
||||
var next Node
|
||||
list, next = t.itemList()
|
||||
switch next.Type() {
|
||||
case nodeEnd: //done
|
||||
case nodeElse:
|
||||
if allowElseIf {
|
||||
// Special case for "else if". If the "else" is followed immediately by an "if",
|
||||
// the elseControl will have left the "if" token pending. Treat
|
||||
// {{if a}}_{{else if b}}_{{end}}
|
||||
// as
|
||||
// {{if a}}_{{else}}{{if b}}_{{end}}{{end}}.
|
||||
// To do this, parse the if as usual and stop at it {{end}}; the subsequent{{end}}
|
||||
// is assumed. This technique works even for long if-else-if chains.
|
||||
// TODO: Should we allow else-if in with and range?
|
||||
if t.peek().typ == itemIf {
|
||||
t.next() // Consume the "if" token.
|
||||
elseList = t.newList(next.Position())
|
||||
elseList.append(t.ifControl())
|
||||
// Do not consume the next item - only one {{end}} required.
|
||||
break
|
||||
}
|
||||
}
|
||||
elseList, next = t.itemList()
|
||||
if next.Type() != nodeEnd {
|
||||
t.errorf("expected end; found %s", next)
|
||||
}
|
||||
}
|
||||
return pipe.Position(), line, pipe, list, elseList
|
||||
}
|
||||
|
||||
// If:
|
||||
// {{if pipeline}} itemList {{end}}
|
||||
// {{if pipeline}} itemList {{else}} itemList {{end}}
|
||||
// If keyword is past.
|
||||
func (t *Tree) ifControl() Node {
|
||||
return t.newIf(t.parseControl(true, "if"))
|
||||
}
|
||||
|
||||
// Range:
|
||||
// {{range pipeline}} itemList {{end}}
|
||||
// {{range pipeline}} itemList {{else}} itemList {{end}}
|
||||
// Range keyword is past.
|
||||
func (t *Tree) rangeControl() Node {
|
||||
return t.newRange(t.parseControl(false, "range"))
|
||||
}
|
||||
|
||||
// With:
|
||||
// {{with pipeline}} itemList {{end}}
|
||||
// {{with pipeline}} itemList {{else}} itemList {{end}}
|
||||
// If keyword is past.
|
||||
func (t *Tree) withControl() Node {
|
||||
return t.newWith(t.parseControl(false, "with"))
|
||||
}
|
||||
|
||||
// End:
|
||||
// {{end}}
|
||||
// End keyword is past.
|
||||
func (t *Tree) endControl() Node {
|
||||
return t.newEnd(t.expect(itemRightDelim, "end").pos)
|
||||
}
|
||||
|
||||
// Else:
|
||||
// {{else}}
|
||||
// Else keyword is past.
|
||||
func (t *Tree) elseControl() Node {
|
||||
// Special case for "else if".
|
||||
peek := t.peekNonSpace()
|
||||
if peek.typ == itemIf {
|
||||
// We see "{{else if ... " but in effect rewrite it to {{else}}{{if ... ".
|
||||
return t.newElse(peek.pos, t.lex.lineNumber())
|
||||
}
|
||||
return t.newElse(t.expect(itemRightDelim, "else").pos, t.lex.lineNumber())
|
||||
}
|
||||
|
||||
// Template:
|
||||
// {{template stringValue pipeline}}
|
||||
// Template keyword is past. The name must be something that can evaluate
|
||||
// to a string.
|
||||
func (t *Tree) templateControl() Node {
|
||||
var name string
|
||||
token := t.nextNonSpace()
|
||||
switch token.typ {
|
||||
case itemString, itemRawString:
|
||||
s, err := strconv.Unquote(token.val)
|
||||
if err != nil {
|
||||
t.error(err)
|
||||
}
|
||||
name = s
|
||||
default:
|
||||
t.unexpected(token, "template invocation")
|
||||
}
|
||||
var pipe *PipeNode
|
||||
if t.nextNonSpace().typ != itemRightDelim {
|
||||
t.backup()
|
||||
// Do not pop variables; they persist until "end".
|
||||
pipe = t.pipeline("template")
|
||||
}
|
||||
return t.newTemplate(token.pos, t.lex.lineNumber(), name, pipe)
|
||||
}
|
||||
|
||||
// command:
|
||||
// operand (space operand)*
|
||||
// space-separated arguments up to a pipeline character or right delimiter.
|
||||
// we consume the pipe character but leave the right delim to terminate the action.
|
||||
func (t *Tree) command() *CommandNode {
|
||||
cmd := t.newCommand(t.peekNonSpace().pos)
|
||||
for {
|
||||
t.peekNonSpace() // skip leading spaces.
|
||||
operand := t.operand()
|
||||
if operand != nil {
|
||||
cmd.append(operand)
|
||||
}
|
||||
switch token := t.next(); token.typ {
|
||||
case itemSpace:
|
||||
continue
|
||||
case itemError:
|
||||
t.errorf("%s", token.val)
|
||||
case itemRightDelim, itemRightParen:
|
||||
t.backup()
|
||||
case itemPipe:
|
||||
default:
|
||||
t.errorf("unexpected %s in operand; missing space?", token)
|
||||
}
|
||||
break
|
||||
}
|
||||
if len(cmd.Args) == 0 {
|
||||
t.errorf("empty command")
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
// operand:
|
||||
// term .Field*
|
||||
// An operand is a space-separated component of a command,
|
||||
// a term possibly followed by field accesses.
|
||||
// A nil return means the next item is not an operand.
|
||||
func (t *Tree) operand() Node {
|
||||
node := t.term()
|
||||
if node == nil {
|
||||
return nil
|
||||
}
|
||||
if t.peek().typ == itemField {
|
||||
chain := t.newChain(t.peek().pos, node)
|
||||
for t.peek().typ == itemField {
|
||||
chain.Add(t.next().val)
|
||||
}
|
||||
// Compatibility with original API: If the term is of type NodeField
|
||||
// or NodeVariable, just put more fields on the original.
|
||||
// Otherwise, keep the Chain node.
|
||||
// TODO: Switch to Chains always when we can.
|
||||
switch node.Type() {
|
||||
case NodeField:
|
||||
node = t.newField(chain.Position(), chain.String())
|
||||
case NodeVariable:
|
||||
node = t.newVariable(chain.Position(), chain.String())
|
||||
default:
|
||||
node = chain
|
||||
}
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// term:
|
||||
// literal (number, string, nil, boolean)
|
||||
// function (identifier)
|
||||
// .
|
||||
// .Field
|
||||
// $
|
||||
// '(' pipeline ')'
|
||||
// A term is a simple "expression".
|
||||
// A nil return means the next item is not a term.
|
||||
func (t *Tree) term() Node {
|
||||
switch token := t.nextNonSpace(); token.typ {
|
||||
case itemError:
|
||||
t.errorf("%s", token.val)
|
||||
case itemIdentifier:
|
||||
if !t.hasFunction(token.val) {
|
||||
t.errorf("function %q not defined", token.val)
|
||||
}
|
||||
return NewIdentifier(token.val).SetTree(t).SetPos(token.pos)
|
||||
case itemDot:
|
||||
return t.newDot(token.pos)
|
||||
case itemNil:
|
||||
return t.newNil(token.pos)
|
||||
case itemVariable:
|
||||
return t.useVar(token.pos, token.val)
|
||||
case itemField:
|
||||
return t.newField(token.pos, token.val)
|
||||
case itemBool:
|
||||
return t.newBool(token.pos, token.val == "true")
|
||||
case itemCharConstant, itemComplex, itemNumber:
|
||||
number, err := t.newNumber(token.pos, token.val, token.typ)
|
||||
if err != nil {
|
||||
t.error(err)
|
||||
}
|
||||
return number
|
||||
case itemLeftParen:
|
||||
pipe := t.pipeline("parenthesized pipeline")
|
||||
if token := t.next(); token.typ != itemRightParen {
|
||||
t.errorf("unclosed right paren: unexpected %s", token)
|
||||
}
|
||||
return pipe
|
||||
case itemString, itemRawString:
|
||||
s, err := strconv.Unquote(token.val)
|
||||
if err != nil {
|
||||
t.error(err)
|
||||
}
|
||||
return t.newString(token.pos, token.val, s)
|
||||
}
|
||||
t.backup()
|
||||
return nil
|
||||
}
|
||||
|
||||
// hasFunction reports if a function name exists in the Tree's maps.
|
||||
func (t *Tree) hasFunction(name string) bool {
|
||||
for _, funcMap := range t.funcs {
|
||||
if funcMap == nil {
|
||||
continue
|
||||
}
|
||||
if funcMap[name] != nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// popVars trims the variable list to the specified length
|
||||
func (t *Tree) popVars(n int) {
|
||||
t.vars = t.vars[:n]
|
||||
}
|
||||
|
||||
// useVar returns a node for a variable reference. It errors if the
|
||||
// variable is not defined.
|
||||
func (t *Tree) useVar(pos Pos, name string) Node {
|
||||
v := t.newVariable(pos, name)
|
||||
for _, varName := range t.vars {
|
||||
if varName == v.Ident[0] {
|
||||
return v
|
||||
}
|
||||
}
|
||||
t.errorf("undefined variable %q", v.Ident[0])
|
||||
return nil
|
||||
}
|
||||
218
vendor/github.com/alecthomas/template/template.go
generated
vendored
Normal file
@ -0,0 +1,218 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package template
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/alecthomas/template/parse"
|
||||
)
|
||||
|
||||
// common holds the information shared by related templates.
|
||||
type common struct {
|
||||
tmpl map[string]*Template
|
||||
// We use two maps, one for parsing and one for execution.
|
||||
// This separation makes the API cleaner since it doesn't
|
||||
// expose reflection to the client.
|
||||
parseFuncs FuncMap
|
||||
execFuncs map[string]reflect.Value
|
||||
}
|
||||
|
||||
// Template is the representation of a parsed template. The *parse.Tree
|
||||
// field is exported only for use by html/template and should be treated
|
||||
// as unexported by all other clients.
|
||||
type Template struct {
|
||||
name string
|
||||
*parse.Tree
|
||||
*common
|
||||
leftDelim string
|
||||
rightDelim string
|
||||
}
|
||||
|
||||
// New allocates a new template with the given name.
|
||||
func New(name string) *Template {
|
||||
return &Template{
|
||||
name: name,
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the name of the template.
|
||||
func (t *Template) Name() string {
|
||||
return t.name
|
||||
}
|
||||
|
||||
// New allocates a new template associated with the given one and with the same
|
||||
// delimiters. The association, which is transitive, allows one template to
|
||||
// invoke another with a {{template}} action.
|
||||
func (t *Template) New(name string) *Template {
|
||||
t.init()
|
||||
return &Template{
|
||||
name: name,
|
||||
common: t.common,
|
||||
leftDelim: t.leftDelim,
|
||||
rightDelim: t.rightDelim,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Template) init() {
|
||||
if t.common == nil {
|
||||
t.common = new(common)
|
||||
t.tmpl = make(map[string]*Template)
|
||||
t.parseFuncs = make(FuncMap)
|
||||
t.execFuncs = make(map[string]reflect.Value)
|
||||
}
|
||||
}
|
||||
|
||||
// Clone returns a duplicate of the template, including all associated
|
||||
// templates. The actual representation is not copied, but the name space of
|
||||
// associated templates is, so further calls to Parse in the copy will add
|
||||
// templates to the copy but not to the original. Clone can be used to prepare
|
||||
// common templates and use them with variant definitions for other templates
|
||||
// by adding the variants after the clone is made.
|
||||
func (t *Template) Clone() (*Template, error) {
|
||||
nt := t.copy(nil)
|
||||
nt.init()
|
||||
nt.tmpl[t.name] = nt
|
||||
for k, v := range t.tmpl {
|
||||
if k == t.name { // Already installed.
|
||||
continue
|
||||
}
|
||||
// The associated templates share nt's common structure.
|
||||
tmpl := v.copy(nt.common)
|
||||
nt.tmpl[k] = tmpl
|
||||
}
|
||||
for k, v := range t.parseFuncs {
|
||||
nt.parseFuncs[k] = v
|
||||
}
|
||||
for k, v := range t.execFuncs {
|
||||
nt.execFuncs[k] = v
|
||||
}
|
||||
return nt, nil
|
||||
}
|
||||
|
||||
// copy returns a shallow copy of t, with common set to the argument.
|
||||
func (t *Template) copy(c *common) *Template {
|
||||
nt := New(t.name)
|
||||
nt.Tree = t.Tree
|
||||
nt.common = c
|
||||
nt.leftDelim = t.leftDelim
|
||||
nt.rightDelim = t.rightDelim
|
||||
return nt
|
||||
}
|
||||
|
||||
// AddParseTree creates a new template with the name and parse tree
|
||||
// and associates it with t.
|
||||
func (t *Template) AddParseTree(name string, tree *parse.Tree) (*Template, error) {
|
||||
if t.common != nil && t.tmpl[name] != nil {
|
||||
return nil, fmt.Errorf("template: redefinition of template %q", name)
|
||||
}
|
||||
nt := t.New(name)
|
||||
nt.Tree = tree
|
||||
t.tmpl[name] = nt
|
||||
return nt, nil
|
||||
}
|
||||
|
||||
// Templates returns a slice of the templates associated with t, including t
|
||||
// itself.
|
||||
func (t *Template) Templates() []*Template {
|
||||
if t.common == nil {
|
||||
return nil
|
||||
}
|
||||
// Return a slice so we don't expose the map.
|
||||
m := make([]*Template, 0, len(t.tmpl))
|
||||
for _, v := range t.tmpl {
|
||||
m = append(m, v)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// Delims sets the action delimiters to the specified strings, to be used in
|
||||
// subsequent calls to Parse, ParseFiles, or ParseGlob. Nested template
|
||||
// definitions will inherit the settings. An empty delimiter stands for the
|
||||
// corresponding default: {{ or }}.
|
||||
// The return value is the template, so calls can be chained.
|
||||
func (t *Template) Delims(left, right string) *Template {
|
||||
t.leftDelim = left
|
||||
t.rightDelim = right
|
||||
return t
|
||||
}
|
||||
|
||||
// Funcs adds the elements of the argument map to the template's function map.
|
||||
// It panics if a value in the map is not a function with appropriate return
|
||||
// type. However, it is legal to overwrite elements of the map. The return
|
||||
// value is the template, so calls can be chained.
|
||||
func (t *Template) Funcs(funcMap FuncMap) *Template {
|
||||
t.init()
|
||||
addValueFuncs(t.execFuncs, funcMap)
|
||||
addFuncs(t.parseFuncs, funcMap)
|
||||
return t
|
||||
}
|
||||
|
||||
// Lookup returns the template with the given name that is associated with t,
|
||||
// or nil if there is no such template.
|
||||
func (t *Template) Lookup(name string) *Template {
|
||||
if t.common == nil {
|
||||
return nil
|
||||
}
|
||||
return t.tmpl[name]
|
||||
}
|
||||
|
||||
// Parse parses a string into a template. Nested template definitions will be
|
||||
// associated with the top-level template t. Parse may be called multiple times
|
||||
// to parse definitions of templates to associate with t. It is an error if a
|
||||
// resulting template is non-empty (contains content other than template
|
||||
// definitions) and would replace a non-empty template with the same name.
|
||||
// (In multiple calls to Parse with the same receiver template, only one call
|
||||
// can contain text other than space, comments, and template definitions.)
|
||||
func (t *Template) Parse(text string) (*Template, error) {
|
||||
t.init()
|
||||
trees, err := parse.Parse(t.name, text, t.leftDelim, t.rightDelim, t.parseFuncs, builtins)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Add the newly parsed trees, including the one for t, into our common structure.
|
||||
for name, tree := range trees {
|
||||
// If the name we parsed is the name of this template, overwrite this template.
|
||||
// The associate method checks it's not a redefinition.
|
||||
tmpl := t
|
||||
if name != t.name {
|
||||
tmpl = t.New(name)
|
||||
}
|
||||
// Even if t == tmpl, we need to install it in the common.tmpl map.
|
||||
if replace, err := t.associate(tmpl, tree); err != nil {
|
||||
return nil, err
|
||||
} else if replace {
|
||||
tmpl.Tree = tree
|
||||
}
|
||||
tmpl.leftDelim = t.leftDelim
|
||||
tmpl.rightDelim = t.rightDelim
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// associate installs the new template into the group of templates associated
|
||||
// with t. It is an error to reuse a name except to overwrite an empty
|
||||
// template. The two are already known to share the common structure.
|
||||
// The boolean return value reports wither to store this tree as t.Tree.
|
||||
func (t *Template) associate(new *Template, tree *parse.Tree) (bool, error) {
|
||||
if new.common != t.common {
|
||||
panic("internal error: associate not common")
|
||||
}
|
||||
name := new.name
|
||||
if old := t.tmpl[name]; old != nil {
|
||||
oldIsEmpty := parse.IsEmptyTree(old.Root)
|
||||
newIsEmpty := parse.IsEmptyTree(tree.Root)
|
||||
if newIsEmpty {
|
||||
// Whether old is empty or not, new is empty; no reason to replace old.
|
||||
return false, nil
|
||||
}
|
||||
if !oldIsEmpty {
|
||||
return false, fmt.Errorf("template: redefinition of template %q", name)
|
||||
}
|
||||
}
|
||||
t.tmpl[name] = new
|
||||
return true, nil
|
||||
}
|
||||
19
vendor/github.com/alecthomas/units/COPYING
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
Copyright (C) 2014 Alec Thomas
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
11
vendor/github.com/alecthomas/units/README.md
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
# Units - Helpful unit multipliers and functions for Go
|
||||
|
||||
The goal of this package is to have functionality similar to the [time](http://golang.org/pkg/time/) package.
|
||||
|
||||
It allows for code like this:
|
||||
|
||||
```go
|
||||
n, err := ParseBase2Bytes("1KB")
|
||||
// n == 1024
|
||||
n = units.Mebibyte * 512
|
||||
```
|
||||
83
vendor/github.com/alecthomas/units/bytes.go
generated
vendored
Normal file
@ -0,0 +1,83 @@
|
||||
package units
|
||||
|
||||
// Base2Bytes is the old non-SI power-of-2 byte scale (1024 bytes in a kilobyte,
|
||||
// etc.).
|
||||
type Base2Bytes int64
|
||||
|
||||
// Base-2 byte units.
|
||||
const (
|
||||
Kibibyte Base2Bytes = 1024
|
||||
KiB = Kibibyte
|
||||
Mebibyte = Kibibyte * 1024
|
||||
MiB = Mebibyte
|
||||
Gibibyte = Mebibyte * 1024
|
||||
GiB = Gibibyte
|
||||
Tebibyte = Gibibyte * 1024
|
||||
TiB = Tebibyte
|
||||
Pebibyte = Tebibyte * 1024
|
||||
PiB = Pebibyte
|
||||
Exbibyte = Pebibyte * 1024
|
||||
EiB = Exbibyte
|
||||
)
|
||||
|
||||
var (
|
||||
bytesUnitMap = MakeUnitMap("iB", "B", 1024)
|
||||
oldBytesUnitMap = MakeUnitMap("B", "B", 1024)
|
||||
)
|
||||
|
||||
// ParseBase2Bytes supports both iB and B in base-2 multipliers. That is, KB
|
||||
// and KiB are both 1024.
|
||||
func ParseBase2Bytes(s string) (Base2Bytes, error) {
|
||||
n, err := ParseUnit(s, bytesUnitMap)
|
||||
if err != nil {
|
||||
n, err = ParseUnit(s, oldBytesUnitMap)
|
||||
}
|
||||
return Base2Bytes(n), err
|
||||
}
|
||||
|
||||
func (b Base2Bytes) String() string {
|
||||
return ToString(int64(b), 1024, "iB", "B")
|
||||
}
|
||||
|
||||
var (
|
||||
metricBytesUnitMap = MakeUnitMap("B", "B", 1000)
|
||||
)
|
||||
|
||||
// MetricBytes are SI byte units (1000 bytes in a kilobyte).
|
||||
type MetricBytes SI
|
||||
|
||||
// SI base-10 byte units.
|
||||
const (
|
||||
Kilobyte MetricBytes = 1000
|
||||
KB = Kilobyte
|
||||
Megabyte = Kilobyte * 1000
|
||||
MB = Megabyte
|
||||
Gigabyte = Megabyte * 1000
|
||||
GB = Gigabyte
|
||||
Terabyte = Gigabyte * 1000
|
||||
TB = Terabyte
|
||||
Petabyte = Terabyte * 1000
|
||||
PB = Petabyte
|
||||
Exabyte = Petabyte * 1000
|
||||
EB = Exabyte
|
||||
)
|
||||
|
||||
// ParseMetricBytes parses base-10 metric byte units. That is, KB is 1000 bytes.
|
||||
func ParseMetricBytes(s string) (MetricBytes, error) {
|
||||
n, err := ParseUnit(s, metricBytesUnitMap)
|
||||
return MetricBytes(n), err
|
||||
}
|
||||
|
||||
func (m MetricBytes) String() string {
|
||||
return ToString(int64(m), 1000, "B", "B")
|
||||
}
|
||||
|
||||
// ParseStrictBytes supports both iB and B suffixes for base 2 and metric,
|
||||
// respectively. That is, KiB represents 1024 and KB represents 1000.
|
||||
func ParseStrictBytes(s string) (int64, error) {
|
||||
n, err := ParseUnit(s, bytesUnitMap)
|
||||
if err != nil {
|
||||
n, err = ParseUnit(s, metricBytesUnitMap)
|
||||
}
|
||||
return int64(n), err
|
||||
}
|
||||
13
vendor/github.com/alecthomas/units/doc.go
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
// Package units provides helpful unit multipliers and functions for Go.
|
||||
//
|
||||
// The goal of this package is to have functionality similar to the time [1] package.
|
||||
//
|
||||
//
|
||||
// [1] http://golang.org/pkg/time/
|
||||
//
|
||||
// It allows for code like this:
|
||||
//
|
||||
// n, err := ParseBase2Bytes("1KB")
|
||||
// // n == 1024
|
||||
// n = units.Mebibyte * 512
|
||||
package units
|
||||
26
vendor/github.com/alecthomas/units/si.go
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
package units
|
||||
|
||||
// SI units.
|
||||
type SI int64
|
||||
|
||||
// SI unit multiples.
|
||||
const (
|
||||
Kilo SI = 1000
|
||||
Mega = Kilo * 1000
|
||||
Giga = Mega * 1000
|
||||
Tera = Giga * 1000
|
||||
Peta = Tera * 1000
|
||||
Exa = Peta * 1000
|
||||
)
|
||||
|
||||
func MakeUnitMap(suffix, shortSuffix string, scale int64) map[string]float64 {
|
||||
return map[string]float64{
|
||||
shortSuffix: 1,
|
||||
"K" + suffix: float64(scale),
|
||||
"M" + suffix: float64(scale * scale),
|
||||
"G" + suffix: float64(scale * scale * scale),
|
||||
"T" + suffix: float64(scale * scale * scale * scale),
|
||||
"P" + suffix: float64(scale * scale * scale * scale * scale),
|
||||
"E" + suffix: float64(scale * scale * scale * scale * scale * scale),
|
||||
}
|
||||
}
|
||||
138
vendor/github.com/alecthomas/units/util.go
generated
vendored
Normal file
@ -0,0 +1,138 @@
|
||||
package units
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
siUnits = []string{"", "K", "M", "G", "T", "P", "E"}
|
||||
)
|
||||
|
||||
func ToString(n int64, scale int64, suffix, baseSuffix string) string {
|
||||
mn := len(siUnits)
|
||||
out := make([]string, mn)
|
||||
for i, m := range siUnits {
|
||||
if n%scale != 0 || i == 0 && n == 0 {
|
||||
s := suffix
|
||||
if i == 0 {
|
||||
s = baseSuffix
|
||||
}
|
||||
out[mn-1-i] = fmt.Sprintf("%d%s%s", n%scale, m, s)
|
||||
}
|
||||
n /= scale
|
||||
if n == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return strings.Join(out, "")
|
||||
}
|
||||
|
||||
// Below code ripped straight from http://golang.org/src/pkg/time/format.go?s=33392:33438#L1123
|
||||
var errLeadingInt = errors.New("units: bad [0-9]*") // never printed
|
||||
|
||||
// leadingInt consumes the leading [0-9]* from s.
|
||||
func leadingInt(s string) (x int64, rem string, err error) {
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
c := s[i]
|
||||
if c < '0' || c > '9' {
|
||||
break
|
||||
}
|
||||
if x >= (1<<63-10)/10 {
|
||||
// overflow
|
||||
return 0, "", errLeadingInt
|
||||
}
|
||||
x = x*10 + int64(c) - '0'
|
||||
}
|
||||
return x, s[i:], nil
|
||||
}
|
||||
|
||||
func ParseUnit(s string, unitMap map[string]float64) (int64, error) {
|
||||
// [-+]?([0-9]*(\.[0-9]*)?[a-z]+)+
|
||||
orig := s
|
||||
f := float64(0)
|
||||
neg := false
|
||||
|
||||
// Consume [-+]?
|
||||
if s != "" {
|
||||
c := s[0]
|
||||
if c == '-' || c == '+' {
|
||||
neg = c == '-'
|
||||
s = s[1:]
|
||||
}
|
||||
}
|
||||
// Special case: if all that is left is "0", this is zero.
|
||||
if s == "0" {
|
||||
return 0, nil
|
||||
}
|
||||
if s == "" {
|
||||
return 0, errors.New("units: invalid " + orig)
|
||||
}
|
||||
for s != "" {
|
||||
g := float64(0) // this element of the sequence
|
||||
|
||||
var x int64
|
||||
var err error
|
||||
|
||||
// The next character must be [0-9.]
|
||||
if !(s[0] == '.' || ('0' <= s[0] && s[0] <= '9')) {
|
||||
return 0, errors.New("units: invalid " + orig)
|
||||
}
|
||||
// Consume [0-9]*
|
||||
pl := len(s)
|
||||
x, s, err = leadingInt(s)
|
||||
if err != nil {
|
||||
return 0, errors.New("units: invalid " + orig)
|
||||
}
|
||||
g = float64(x)
|
||||
pre := pl != len(s) // whether we consumed anything before a period
|
||||
|
||||
// Consume (\.[0-9]*)?
|
||||
post := false
|
||||
if s != "" && s[0] == '.' {
|
||||
s = s[1:]
|
||||
pl := len(s)
|
||||
x, s, err = leadingInt(s)
|
||||
if err != nil {
|
||||
return 0, errors.New("units: invalid " + orig)
|
||||
}
|
||||
scale := 1.0
|
||||
for n := pl - len(s); n > 0; n-- {
|
||||
scale *= 10
|
||||
}
|
||||
g += float64(x) / scale
|
||||
post = pl != len(s)
|
||||
}
|
||||
if !pre && !post {
|
||||
// no digits (e.g. ".s" or "-.s")
|
||||
return 0, errors.New("units: invalid " + orig)
|
||||
}
|
||||
|
||||
// Consume unit.
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
c := s[i]
|
||||
if c == '.' || ('0' <= c && c <= '9') {
|
||||
break
|
||||
}
|
||||
}
|
||||
u := s[:i]
|
||||
s = s[i:]
|
||||
unit, ok := unitMap[u]
|
||||
if !ok {
|
||||
return 0, errors.New("units: unknown unit " + u + " in " + orig)
|
||||
}
|
||||
|
||||
f += g * unit
|
||||
}
|
||||
|
||||
if neg {
|
||||
f = -f
|
||||
}
|
||||
if f < float64(-1<<63) || f > float64(1<<63-1) {
|
||||
return 0, errors.New("units: overflow parsing unit")
|
||||
}
|
||||
return int64(f), nil
|
||||
}
|
||||
15
vendor/github.com/golang/snappy/AUTHORS
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
# This is the official list of Snappy-Go authors for copyright purposes.
|
||||
# This file is distinct from the CONTRIBUTORS files.
|
||||
# See the latter for an explanation.
|
||||
|
||||
# Names should be added to this file as
|
||||
# Name or Organization <email address>
|
||||
# The email address is not required for organizations.
|
||||
|
||||
# Please keep the list sorted.
|
||||
|
||||
Damian Gryski <dgryski@gmail.com>
|
||||
Google Inc.
|
||||
Jan Mercl <0xjnml@gmail.com>
|
||||
Rodolfo Carvalho <rhcarvalho@gmail.com>
|
||||
Sebastien Binet <seb.binet@gmail.com>
|
||||
37
vendor/github.com/golang/snappy/CONTRIBUTORS
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
# This is the official list of people who can contribute
|
||||
# (and typically have contributed) code to the Snappy-Go repository.
|
||||
# The AUTHORS file lists the copyright holders; this file
|
||||
# lists people. For example, Google employees are listed here
|
||||
# but not in AUTHORS, because Google holds the copyright.
|
||||
#
|
||||
# The submission process automatically checks to make sure
|
||||
# that people submitting code are listed in this file (by email address).
|
||||
#
|
||||
# Names should be added to this file only after verifying that
|
||||
# the individual or the individual's organization has agreed to
|
||||
# the appropriate Contributor License Agreement, found here:
|
||||
#
|
||||
# http://code.google.com/legal/individual-cla-v1.0.html
|
||||
# http://code.google.com/legal/corporate-cla-v1.0.html
|
||||
#
|
||||
# The agreement for individuals can be filled out on the web.
|
||||
#
|
||||
# When adding J Random Contributor's name to this file,
|
||||
# either J's name or J's organization's name should be
|
||||
# added to the AUTHORS file, depending on whether the
|
||||
# individual or corporate CLA was used.
|
||||
|
||||
# Names should be added to this file like so:
|
||||
# Name <email address>
|
||||
|
||||
# Please keep the list sorted.
|
||||
|
||||
Damian Gryski <dgryski@gmail.com>
|
||||
Jan Mercl <0xjnml@gmail.com>
|
||||
Kai Backman <kaib@golang.org>
|
||||
Marc-Antoine Ruel <maruel@chromium.org>
|
||||
Nigel Tao <nigeltao@golang.org>
|
||||
Rob Pike <r@golang.org>
|
||||
Rodolfo Carvalho <rhcarvalho@gmail.com>
|
||||
Russ Cox <rsc@golang.org>
|
||||
Sebastien Binet <seb.binet@gmail.com>
|
||||
27
vendor/github.com/golang/snappy/LICENSE
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
107
vendor/github.com/golang/snappy/README
generated
vendored
Normal file
@ -0,0 +1,107 @@
|
||||
The Snappy compression format in the Go programming language.
|
||||
|
||||
To download and install from source:
|
||||
$ go get github.com/golang/snappy
|
||||
|
||||
Unless otherwise noted, the Snappy-Go source files are distributed
|
||||
under the BSD-style license found in the LICENSE file.
|
||||
|
||||
|
||||
|
||||
Benchmarks.
|
||||
|
||||
The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten
|
||||
or so files, the same set used by the C++ Snappy code (github.com/google/snappy
|
||||
and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @
|
||||
3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29:
|
||||
|
||||
"go test -test.bench=."
|
||||
|
||||
_UFlat0-8 2.19GB/s ± 0% html
|
||||
_UFlat1-8 1.41GB/s ± 0% urls
|
||||
_UFlat2-8 23.5GB/s ± 2% jpg
|
||||
_UFlat3-8 1.91GB/s ± 0% jpg_200
|
||||
_UFlat4-8 14.0GB/s ± 1% pdf
|
||||
_UFlat5-8 1.97GB/s ± 0% html4
|
||||
_UFlat6-8 814MB/s ± 0% txt1
|
||||
_UFlat7-8 785MB/s ± 0% txt2
|
||||
_UFlat8-8 857MB/s ± 0% txt3
|
||||
_UFlat9-8 719MB/s ± 1% txt4
|
||||
_UFlat10-8 2.84GB/s ± 0% pb
|
||||
_UFlat11-8 1.05GB/s ± 0% gaviota
|
||||
|
||||
_ZFlat0-8 1.04GB/s ± 0% html
|
||||
_ZFlat1-8 534MB/s ± 0% urls
|
||||
_ZFlat2-8 15.7GB/s ± 1% jpg
|
||||
_ZFlat3-8 740MB/s ± 3% jpg_200
|
||||
_ZFlat4-8 9.20GB/s ± 1% pdf
|
||||
_ZFlat5-8 991MB/s ± 0% html4
|
||||
_ZFlat6-8 379MB/s ± 0% txt1
|
||||
_ZFlat7-8 352MB/s ± 0% txt2
|
||||
_ZFlat8-8 396MB/s ± 1% txt3
|
||||
_ZFlat9-8 327MB/s ± 1% txt4
|
||||
_ZFlat10-8 1.33GB/s ± 1% pb
|
||||
_ZFlat11-8 605MB/s ± 1% gaviota
|
||||
|
||||
|
||||
|
||||
"go test -test.bench=. -tags=noasm"
|
||||
|
||||
_UFlat0-8 621MB/s ± 2% html
|
||||
_UFlat1-8 494MB/s ± 1% urls
|
||||
_UFlat2-8 23.2GB/s ± 1% jpg
|
||||
_UFlat3-8 1.12GB/s ± 1% jpg_200
|
||||
_UFlat4-8 4.35GB/s ± 1% pdf
|
||||
_UFlat5-8 609MB/s ± 0% html4
|
||||
_UFlat6-8 296MB/s ± 0% txt1
|
||||
_UFlat7-8 288MB/s ± 0% txt2
|
||||
_UFlat8-8 309MB/s ± 1% txt3
|
||||
_UFlat9-8 280MB/s ± 1% txt4
|
||||
_UFlat10-8 753MB/s ± 0% pb
|
||||
_UFlat11-8 400MB/s ± 0% gaviota
|
||||
|
||||
_ZFlat0-8 409MB/s ± 1% html
|
||||
_ZFlat1-8 250MB/s ± 1% urls
|
||||
_ZFlat2-8 12.3GB/s ± 1% jpg
|
||||
_ZFlat3-8 132MB/s ± 0% jpg_200
|
||||
_ZFlat4-8 2.92GB/s ± 0% pdf
|
||||
_ZFlat5-8 405MB/s ± 1% html4
|
||||
_ZFlat6-8 179MB/s ± 1% txt1
|
||||
_ZFlat7-8 170MB/s ± 1% txt2
|
||||
_ZFlat8-8 189MB/s ± 1% txt3
|
||||
_ZFlat9-8 164MB/s ± 1% txt4
|
||||
_ZFlat10-8 479MB/s ± 1% pb
|
||||
_ZFlat11-8 270MB/s ± 1% gaviota
|
||||
|
||||
|
||||
|
||||
For comparison (Go's encoded output is byte-for-byte identical to C++'s), here
|
||||
are the numbers from C++ Snappy's
|
||||
|
||||
make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log
|
||||
|
||||
BM_UFlat/0 2.4GB/s html
|
||||
BM_UFlat/1 1.4GB/s urls
|
||||
BM_UFlat/2 21.8GB/s jpg
|
||||
BM_UFlat/3 1.5GB/s jpg_200
|
||||
BM_UFlat/4 13.3GB/s pdf
|
||||
BM_UFlat/5 2.1GB/s html4
|
||||
BM_UFlat/6 1.0GB/s txt1
|
||||
BM_UFlat/7 959.4MB/s txt2
|
||||
BM_UFlat/8 1.0GB/s txt3
|
||||
BM_UFlat/9 864.5MB/s txt4
|
||||
BM_UFlat/10 2.9GB/s pb
|
||||
BM_UFlat/11 1.2GB/s gaviota
|
||||
|
||||
BM_ZFlat/0 944.3MB/s html (22.31 %)
|
||||
BM_ZFlat/1 501.6MB/s urls (47.78 %)
|
||||
BM_ZFlat/2 14.3GB/s jpg (99.95 %)
|
||||
BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %)
|
||||
BM_ZFlat/4 8.3GB/s pdf (83.30 %)
|
||||
BM_ZFlat/5 903.5MB/s html4 (22.52 %)
|
||||
BM_ZFlat/6 336.0MB/s txt1 (57.88 %)
|
||||
BM_ZFlat/7 312.3MB/s txt2 (61.91 %)
|
||||
BM_ZFlat/8 353.1MB/s txt3 (54.99 %)
|
||||
BM_ZFlat/9 289.9MB/s txt4 (66.26 %)
|
||||
BM_ZFlat/10 1.2GB/s pb (19.68 %)
|
||||
BM_ZFlat/11 527.4MB/s gaviota (37.72 %)
|
||||
237
vendor/github.com/golang/snappy/decode.go
generated
vendored
Normal file
@ -0,0 +1,237 @@
|
||||
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package snappy
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrCorrupt reports that the input is invalid.
|
||||
ErrCorrupt = errors.New("snappy: corrupt input")
|
||||
// ErrTooLarge reports that the uncompressed length is too large.
|
||||
ErrTooLarge = errors.New("snappy: decoded block is too large")
|
||||
// ErrUnsupported reports that the input isn't supported.
|
||||
ErrUnsupported = errors.New("snappy: unsupported input")
|
||||
|
||||
errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length")
|
||||
)
|
||||
|
||||
// DecodedLen returns the length of the decoded block.
|
||||
func DecodedLen(src []byte) (int, error) {
|
||||
v, _, err := decodedLen(src)
|
||||
return v, err
|
||||
}
|
||||
|
||||
// decodedLen returns the length of the decoded block and the number of bytes
|
||||
// that the length header occupied.
|
||||
func decodedLen(src []byte) (blockLen, headerLen int, err error) {
|
||||
v, n := binary.Uvarint(src)
|
||||
if n <= 0 || v > 0xffffffff {
|
||||
return 0, 0, ErrCorrupt
|
||||
}
|
||||
|
||||
const wordSize = 32 << (^uint(0) >> 32 & 1)
|
||||
if wordSize == 32 && v > 0x7fffffff {
|
||||
return 0, 0, ErrTooLarge
|
||||
}
|
||||
return int(v), n, nil
|
||||
}
|
||||
|
||||
const (
|
||||
decodeErrCodeCorrupt = 1
|
||||
decodeErrCodeUnsupportedLiteralLength = 2
|
||||
)
|
||||
|
||||
// Decode returns the decoded form of src. The returned slice may be a sub-
|
||||
// slice of dst if dst was large enough to hold the entire decoded block.
|
||||
// Otherwise, a newly allocated slice will be returned.
|
||||
//
|
||||
// The dst and src must not overlap. It is valid to pass a nil dst.
|
||||
func Decode(dst, src []byte) ([]byte, error) {
|
||||
dLen, s, err := decodedLen(src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if dLen <= len(dst) {
|
||||
dst = dst[:dLen]
|
||||
} else {
|
||||
dst = make([]byte, dLen)
|
||||
}
|
||||
switch decode(dst, src[s:]) {
|
||||
case 0:
|
||||
return dst, nil
|
||||
case decodeErrCodeUnsupportedLiteralLength:
|
||||
return nil, errUnsupportedLiteralLength
|
||||
}
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
// NewReader returns a new Reader that decompresses from r, using the framing
|
||||
// format described at
|
||||
// https://github.com/google/snappy/blob/master/framing_format.txt
|
||||
func NewReader(r io.Reader) *Reader {
|
||||
return &Reader{
|
||||
r: r,
|
||||
decoded: make([]byte, maxBlockSize),
|
||||
buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize),
|
||||
}
|
||||
}
|
||||
|
||||
// Reader is an io.Reader that can read Snappy-compressed bytes.
|
||||
type Reader struct {
|
||||
r io.Reader
|
||||
err error
|
||||
decoded []byte
|
||||
buf []byte
|
||||
// decoded[i:j] contains decoded bytes that have not yet been passed on.
|
||||
i, j int
|
||||
readHeader bool
|
||||
}
|
||||
|
||||
// Reset discards any buffered data, resets all state, and switches the Snappy
|
||||
// reader to read from r. This permits reusing a Reader rather than allocating
|
||||
// a new one.
|
||||
func (r *Reader) Reset(reader io.Reader) {
|
||||
r.r = reader
|
||||
r.err = nil
|
||||
r.i = 0
|
||||
r.j = 0
|
||||
r.readHeader = false
|
||||
}
|
||||
|
||||
func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) {
|
||||
if _, r.err = io.ReadFull(r.r, p); r.err != nil {
|
||||
if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
|
||||
r.err = ErrCorrupt
|
||||
}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Read satisfies the io.Reader interface.
|
||||
func (r *Reader) Read(p []byte) (int, error) {
|
||||
if r.err != nil {
|
||||
return 0, r.err
|
||||
}
|
||||
for {
|
||||
if r.i < r.j {
|
||||
n := copy(p, r.decoded[r.i:r.j])
|
||||
r.i += n
|
||||
return n, nil
|
||||
}
|
||||
if !r.readFull(r.buf[:4], true) {
|
||||
return 0, r.err
|
||||
}
|
||||
chunkType := r.buf[0]
|
||||
if !r.readHeader {
|
||||
if chunkType != chunkTypeStreamIdentifier {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
r.readHeader = true
|
||||
}
|
||||
chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
|
||||
if chunkLen > len(r.buf) {
|
||||
r.err = ErrUnsupported
|
||||
return 0, r.err
|
||||
}
|
||||
|
||||
// The chunk types are specified at
|
||||
// https://github.com/google/snappy/blob/master/framing_format.txt
|
||||
switch chunkType {
|
||||
case chunkTypeCompressedData:
|
||||
// Section 4.2. Compressed data (chunk type 0x00).
|
||||
if chunkLen < checksumSize {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
buf := r.buf[:chunkLen]
|
||||
if !r.readFull(buf, false) {
|
||||
return 0, r.err
|
||||
}
|
||||
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
|
||||
buf = buf[checksumSize:]
|
||||
|
||||
n, err := DecodedLen(buf)
|
||||
if err != nil {
|
||||
r.err = err
|
||||
return 0, r.err
|
||||
}
|
||||
if n > len(r.decoded) {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
if _, err := Decode(r.decoded, buf); err != nil {
|
||||
r.err = err
|
||||
return 0, r.err
|
||||
}
|
||||
if crc(r.decoded[:n]) != checksum {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
r.i, r.j = 0, n
|
||||
continue
|
||||
|
||||
case chunkTypeUncompressedData:
|
||||
// Section 4.3. Uncompressed data (chunk type 0x01).
|
||||
if chunkLen < checksumSize {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
buf := r.buf[:checksumSize]
|
||||
if !r.readFull(buf, false) {
|
||||
return 0, r.err
|
||||
}
|
||||
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
|
||||
// Read directly into r.decoded instead of via r.buf.
|
||||
n := chunkLen - checksumSize
|
||||
if n > len(r.decoded) {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
if !r.readFull(r.decoded[:n], false) {
|
||||
return 0, r.err
|
||||
}
|
||||
if crc(r.decoded[:n]) != checksum {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
r.i, r.j = 0, n
|
||||
continue
|
||||
|
||||
case chunkTypeStreamIdentifier:
|
||||
// Section 4.1. Stream identifier (chunk type 0xff).
|
||||
if chunkLen != len(magicBody) {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
if !r.readFull(r.buf[:len(magicBody)], false) {
|
||||
return 0, r.err
|
||||
}
|
||||
for i := 0; i < len(magicBody); i++ {
|
||||
if r.buf[i] != magicBody[i] {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if chunkType <= 0x7f {
|
||||
// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
|
||||
r.err = ErrUnsupported
|
||||
return 0, r.err
|
||||
}
|
||||
// Section 4.4 Padding (chunk type 0xfe).
|
||||
// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
|
||||
if !r.readFull(r.buf[:chunkLen], false) {
|
||||
return 0, r.err
|
||||
}
|
||||
}
|
||||
}
|
||||
14
vendor/github.com/golang/snappy/decode_amd64.go
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !noasm
|
||||
|
||||
package snappy
|
||||
|
||||
// decode has the same semantics as in decode_other.go.
|
||||
//
|
||||
//go:noescape
|
||||
func decode(dst, src []byte) int
|
||||
490
vendor/github.com/golang/snappy/decode_amd64.s
generated
vendored
Normal file
@ -0,0 +1,490 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !noasm
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// The asm code generally follows the pure Go code in decode_other.go, except
|
||||
// where marked with a "!!!".
|
||||
|
||||
// func decode(dst, src []byte) int
|
||||
//
|
||||
// All local variables fit into registers. The non-zero stack size is only to
|
||||
// spill registers and push args when issuing a CALL. The register allocation:
|
||||
// - AX scratch
|
||||
// - BX scratch
|
||||
// - CX length or x
|
||||
// - DX offset
|
||||
// - SI &src[s]
|
||||
// - DI &dst[d]
|
||||
// + R8 dst_base
|
||||
// + R9 dst_len
|
||||
// + R10 dst_base + dst_len
|
||||
// + R11 src_base
|
||||
// + R12 src_len
|
||||
// + R13 src_base + src_len
|
||||
// - R14 used by doCopy
|
||||
// - R15 used by doCopy
|
||||
//
|
||||
// The registers R8-R13 (marked with a "+") are set at the start of the
|
||||
// function, and after a CALL returns, and are not otherwise modified.
|
||||
//
|
||||
// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI.
|
||||
// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI.
|
||||
TEXT ·decode(SB), NOSPLIT, $48-56
|
||||
// Initialize SI, DI and R8-R13.
|
||||
MOVQ dst_base+0(FP), R8
|
||||
MOVQ dst_len+8(FP), R9
|
||||
MOVQ R8, DI
|
||||
MOVQ R8, R10
|
||||
ADDQ R9, R10
|
||||
MOVQ src_base+24(FP), R11
|
||||
MOVQ src_len+32(FP), R12
|
||||
MOVQ R11, SI
|
||||
MOVQ R11, R13
|
||||
ADDQ R12, R13
|
||||
|
||||
loop:
|
||||
// for s < len(src)
|
||||
CMPQ SI, R13
|
||||
JEQ end
|
||||
|
||||
// CX = uint32(src[s])
|
||||
//
|
||||
// switch src[s] & 0x03
|
||||
MOVBLZX (SI), CX
|
||||
MOVL CX, BX
|
||||
ANDL $3, BX
|
||||
CMPL BX, $1
|
||||
JAE tagCopy
|
||||
|
||||
// ----------------------------------------
|
||||
// The code below handles literal tags.
|
||||
|
||||
// case tagLiteral:
|
||||
// x := uint32(src[s] >> 2)
|
||||
// switch
|
||||
SHRL $2, CX
|
||||
CMPL CX, $60
|
||||
JAE tagLit60Plus
|
||||
|
||||
// case x < 60:
|
||||
// s++
|
||||
INCQ SI
|
||||
|
||||
doLit:
|
||||
// This is the end of the inner "switch", when we have a literal tag.
|
||||
//
|
||||
// We assume that CX == x and x fits in a uint32, where x is the variable
|
||||
// used in the pure Go decode_other.go code.
|
||||
|
||||
// length = int(x) + 1
|
||||
//
|
||||
// Unlike the pure Go code, we don't need to check if length <= 0 because
|
||||
// CX can hold 64 bits, so the increment cannot overflow.
|
||||
INCQ CX
|
||||
|
||||
// Prepare to check if copying length bytes will run past the end of dst or
|
||||
// src.
|
||||
//
|
||||
// AX = len(dst) - d
|
||||
// BX = len(src) - s
|
||||
MOVQ R10, AX
|
||||
SUBQ DI, AX
|
||||
MOVQ R13, BX
|
||||
SUBQ SI, BX
|
||||
|
||||
// !!! Try a faster technique for short (16 or fewer bytes) copies.
|
||||
//
|
||||
// if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
|
||||
// goto callMemmove // Fall back on calling runtime·memmove.
|
||||
// }
|
||||
//
|
||||
// The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
|
||||
// against 21 instead of 16, because it cannot assume that all of its input
|
||||
// is contiguous in memory and so it needs to leave enough source bytes to
|
||||
// read the next tag without refilling buffers, but Go's Decode assumes
|
||||
// contiguousness (the src argument is a []byte).
|
||||
CMPQ CX, $16
|
||||
JGT callMemmove
|
||||
CMPQ AX, $16
|
||||
JLT callMemmove
|
||||
CMPQ BX, $16
|
||||
JLT callMemmove
|
||||
|
||||
// !!! Implement the copy from src to dst as a 16-byte load and store.
|
||||
// (Decode's documentation says that dst and src must not overlap.)
|
||||
//
|
||||
// This always copies 16 bytes, instead of only length bytes, but that's
|
||||
// OK. If the input is a valid Snappy encoding then subsequent iterations
|
||||
// will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
|
||||
// non-nil error), so the overrun will be ignored.
|
||||
//
|
||||
// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
|
||||
// 16-byte loads and stores. This technique probably wouldn't be as
|
||||
// effective on architectures that are fussier about alignment.
|
||||
MOVOU 0(SI), X0
|
||||
MOVOU X0, 0(DI)
|
||||
|
||||
// d += length
|
||||
// s += length
|
||||
ADDQ CX, DI
|
||||
ADDQ CX, SI
|
||||
JMP loop
|
||||
|
||||
callMemmove:
|
||||
// if length > len(dst)-d || length > len(src)-s { etc }
|
||||
CMPQ CX, AX
|
||||
JGT errCorrupt
|
||||
CMPQ CX, BX
|
||||
JGT errCorrupt
|
||||
|
||||
// copy(dst[d:], src[s:s+length])
|
||||
//
|
||||
// This means calling runtime·memmove(&dst[d], &src[s], length), so we push
|
||||
// DI, SI and CX as arguments. Coincidentally, we also need to spill those
|
||||
// three registers to the stack, to save local variables across the CALL.
|
||||
MOVQ DI, 0(SP)
|
||||
MOVQ SI, 8(SP)
|
||||
MOVQ CX, 16(SP)
|
||||
MOVQ DI, 24(SP)
|
||||
MOVQ SI, 32(SP)
|
||||
MOVQ CX, 40(SP)
|
||||
CALL runtime·memmove(SB)
|
||||
|
||||
// Restore local variables: unspill registers from the stack and
|
||||
// re-calculate R8-R13.
|
||||
MOVQ 24(SP), DI
|
||||
MOVQ 32(SP), SI
|
||||
MOVQ 40(SP), CX
|
||||
MOVQ dst_base+0(FP), R8
|
||||
MOVQ dst_len+8(FP), R9
|
||||
MOVQ R8, R10
|
||||
ADDQ R9, R10
|
||||
MOVQ src_base+24(FP), R11
|
||||
MOVQ src_len+32(FP), R12
|
||||
MOVQ R11, R13
|
||||
ADDQ R12, R13
|
||||
|
||||
// d += length
|
||||
// s += length
|
||||
ADDQ CX, DI
|
||||
ADDQ CX, SI
|
||||
JMP loop
|
||||
|
||||
tagLit60Plus:
|
||||
// !!! This fragment does the
|
||||
//
|
||||
// s += x - 58; if uint(s) > uint(len(src)) { etc }
|
||||
//
|
||||
// checks. In the asm version, we code it once instead of once per switch case.
|
||||
ADDQ CX, SI
|
||||
SUBQ $58, SI
|
||||
MOVQ SI, BX
|
||||
SUBQ R11, BX
|
||||
CMPQ BX, R12
|
||||
JA errCorrupt
|
||||
|
||||
// case x == 60:
|
||||
CMPL CX, $61
|
||||
JEQ tagLit61
|
||||
JA tagLit62Plus
|
||||
|
||||
// x = uint32(src[s-1])
|
||||
MOVBLZX -1(SI), CX
|
||||
JMP doLit
|
||||
|
||||
tagLit61:
|
||||
// case x == 61:
|
||||
// x = uint32(src[s-2]) | uint32(src[s-1])<<8
|
||||
MOVWLZX -2(SI), CX
|
||||
JMP doLit
|
||||
|
||||
tagLit62Plus:
|
||||
CMPL CX, $62
|
||||
JA tagLit63
|
||||
|
||||
// case x == 62:
|
||||
// x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
|
||||
MOVWLZX -3(SI), CX
|
||||
MOVBLZX -1(SI), BX
|
||||
SHLL $16, BX
|
||||
ORL BX, CX
|
||||
JMP doLit
|
||||
|
||||
tagLit63:
|
||||
// case x == 63:
|
||||
// x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
|
||||
MOVL -4(SI), CX
|
||||
JMP doLit
|
||||
|
||||
// The code above handles literal tags.
|
||||
// ----------------------------------------
|
||||
// The code below handles copy tags.
|
||||
|
||||
tagCopy4:
|
||||
// case tagCopy4:
|
||||
// s += 5
|
||||
ADDQ $5, SI
|
||||
|
||||
// if uint(s) > uint(len(src)) { etc }
|
||||
MOVQ SI, BX
|
||||
SUBQ R11, BX
|
||||
CMPQ BX, R12
|
||||
JA errCorrupt
|
||||
|
||||
// length = 1 + int(src[s-5])>>2
|
||||
SHRQ $2, CX
|
||||
INCQ CX
|
||||
|
||||
// offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
|
||||
MOVLQZX -4(SI), DX
|
||||
JMP doCopy
|
||||
|
||||
tagCopy2:
|
||||
// case tagCopy2:
|
||||
// s += 3
|
||||
ADDQ $3, SI
|
||||
|
||||
// if uint(s) > uint(len(src)) { etc }
|
||||
MOVQ SI, BX
|
||||
SUBQ R11, BX
|
||||
CMPQ BX, R12
|
||||
JA errCorrupt
|
||||
|
||||
// length = 1 + int(src[s-3])>>2
|
||||
SHRQ $2, CX
|
||||
INCQ CX
|
||||
|
||||
// offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
|
||||
MOVWQZX -2(SI), DX
|
||||
JMP doCopy
|
||||
|
||||
tagCopy:
|
||||
// We have a copy tag. We assume that:
|
||||
// - BX == src[s] & 0x03
|
||||
// - CX == src[s]
|
||||
CMPQ BX, $2
|
||||
JEQ tagCopy2
|
||||
JA tagCopy4
|
||||
|
||||
// case tagCopy1:
|
||||
// s += 2
|
||||
ADDQ $2, SI
|
||||
|
||||
// if uint(s) > uint(len(src)) { etc }
|
||||
MOVQ SI, BX
|
||||
SUBQ R11, BX
|
||||
CMPQ BX, R12
|
||||
JA errCorrupt
|
||||
|
||||
// offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
|
||||
MOVQ CX, DX
|
||||
ANDQ $0xe0, DX
|
||||
SHLQ $3, DX
|
||||
MOVBQZX -1(SI), BX
|
||||
ORQ BX, DX
|
||||
|
||||
// length = 4 + int(src[s-2])>>2&0x7
|
||||
SHRQ $2, CX
|
||||
ANDQ $7, CX
|
||||
ADDQ $4, CX
|
||||
|
||||
doCopy:
|
||||
// This is the end of the outer "switch", when we have a copy tag.
|
||||
//
|
||||
// We assume that:
|
||||
// - CX == length && CX > 0
|
||||
// - DX == offset
|
||||
|
||||
// if offset <= 0 { etc }
|
||||
CMPQ DX, $0
|
||||
JLE errCorrupt
|
||||
|
||||
// if d < offset { etc }
|
||||
MOVQ DI, BX
|
||||
SUBQ R8, BX
|
||||
CMPQ BX, DX
|
||||
JLT errCorrupt
|
||||
|
||||
// if length > len(dst)-d { etc }
|
||||
MOVQ R10, BX
|
||||
SUBQ DI, BX
|
||||
CMPQ CX, BX
|
||||
JGT errCorrupt
|
||||
|
||||
// forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
|
||||
//
|
||||
// Set:
|
||||
// - R14 = len(dst)-d
|
||||
// - R15 = &dst[d-offset]
|
||||
MOVQ R10, R14
|
||||
SUBQ DI, R14
|
||||
MOVQ DI, R15
|
||||
SUBQ DX, R15
|
||||
|
||||
// !!! Try a faster technique for short (16 or fewer bytes) forward copies.
|
||||
//
|
||||
// First, try using two 8-byte load/stores, similar to the doLit technique
|
||||
// above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
|
||||
// still OK if offset >= 8. Note that this has to be two 8-byte load/stores
|
||||
// and not one 16-byte load/store, and the first store has to be before the
|
||||
// second load, due to the overlap if offset is in the range [8, 16).
|
||||
//
|
||||
// if length > 16 || offset < 8 || len(dst)-d < 16 {
|
||||
// goto slowForwardCopy
|
||||
// }
|
||||
// copy 16 bytes
|
||||
// d += length
|
||||
CMPQ CX, $16
|
||||
JGT slowForwardCopy
|
||||
CMPQ DX, $8
|
||||
JLT slowForwardCopy
|
||||
CMPQ R14, $16
|
||||
JLT slowForwardCopy
|
||||
MOVQ 0(R15), AX
|
||||
MOVQ AX, 0(DI)
|
||||
MOVQ 8(R15), BX
|
||||
MOVQ BX, 8(DI)
|
||||
ADDQ CX, DI
|
||||
JMP loop
|
||||
|
||||
slowForwardCopy:
|
||||
// !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
|
||||
// can still try 8-byte load stores, provided we can overrun up to 10 extra
|
||||
// bytes. As above, the overrun will be fixed up by subsequent iterations
|
||||
// of the outermost loop.
|
||||
//
|
||||
// The C++ snappy code calls this technique IncrementalCopyFastPath. Its
|
||||
// commentary says:
|
||||
//
|
||||
// ----
|
||||
//
|
||||
// The main part of this loop is a simple copy of eight bytes at a time
|
||||
// until we've copied (at least) the requested amount of bytes. However,
|
||||
// if d and d-offset are less than eight bytes apart (indicating a
|
||||
// repeating pattern of length < 8), we first need to expand the pattern in
|
||||
// order to get the correct results. For instance, if the buffer looks like
|
||||
// this, with the eight-byte <d-offset> and <d> patterns marked as
|
||||
// intervals:
|
||||
//
|
||||
// abxxxxxxxxxxxx
|
||||
// [------] d-offset
|
||||
// [------] d
|
||||
//
|
||||
// a single eight-byte copy from <d-offset> to <d> will repeat the pattern
|
||||
// once, after which we can move <d> two bytes without moving <d-offset>:
|
||||
//
|
||||
// ababxxxxxxxxxx
|
||||
// [------] d-offset
|
||||
// [------] d
|
||||
//
|
||||
// and repeat the exercise until the two no longer overlap.
|
||||
//
|
||||
// This allows us to do very well in the special case of one single byte
|
||||
// repeated many times, without taking a big hit for more general cases.
|
||||
//
|
||||
// The worst case of extra writing past the end of the match occurs when
|
||||
// offset == 1 and length == 1; the last copy will read from byte positions
|
||||
// [0..7] and write to [4..11], whereas it was only supposed to write to
|
||||
// position 1. Thus, ten excess bytes.
|
||||
//
|
||||
// ----
|
||||
//
|
||||
// That "10 byte overrun" worst case is confirmed by Go's
|
||||
// TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
|
||||
// and finishSlowForwardCopy algorithm.
|
||||
//
|
||||
// if length > len(dst)-d-10 {
|
||||
// goto verySlowForwardCopy
|
||||
// }
|
||||
SUBQ $10, R14
|
||||
CMPQ CX, R14
|
||||
JGT verySlowForwardCopy
|
||||
|
||||
makeOffsetAtLeast8:
|
||||
// !!! As above, expand the pattern so that offset >= 8 and we can use
|
||||
// 8-byte load/stores.
|
||||
//
|
||||
// for offset < 8 {
|
||||
// copy 8 bytes from dst[d-offset:] to dst[d:]
|
||||
// length -= offset
|
||||
// d += offset
|
||||
// offset += offset
|
||||
// // The two previous lines together means that d-offset, and therefore
|
||||
// // R15, is unchanged.
|
||||
// }
|
||||
CMPQ DX, $8
|
||||
JGE fixUpSlowForwardCopy
|
||||
MOVQ (R15), BX
|
||||
MOVQ BX, (DI)
|
||||
SUBQ DX, CX
|
||||
ADDQ DX, DI
|
||||
ADDQ DX, DX
|
||||
JMP makeOffsetAtLeast8
|
||||
|
||||
fixUpSlowForwardCopy:
|
||||
// !!! Add length (which might be negative now) to d (implied by DI being
|
||||
// &dst[d]) so that d ends up at the right place when we jump back to the
|
||||
// top of the loop. Before we do that, though, we save DI to AX so that, if
|
||||
// length is positive, copying the remaining length bytes will write to the
|
||||
// right place.
|
||||
MOVQ DI, AX
|
||||
ADDQ CX, DI
|
||||
|
||||
finishSlowForwardCopy:
|
||||
// !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
|
||||
// length means that we overrun, but as above, that will be fixed up by
|
||||
// subsequent iterations of the outermost loop.
|
||||
CMPQ CX, $0
|
||||
JLE loop
|
||||
MOVQ (R15), BX
|
||||
MOVQ BX, (AX)
|
||||
ADDQ $8, R15
|
||||
ADDQ $8, AX
|
||||
SUBQ $8, CX
|
||||
JMP finishSlowForwardCopy
|
||||
|
||||
verySlowForwardCopy:
|
||||
// verySlowForwardCopy is a simple implementation of forward copy. In C
|
||||
// parlance, this is a do/while loop instead of a while loop, since we know
|
||||
// that length > 0. In Go syntax:
|
||||
//
|
||||
// for {
|
||||
// dst[d] = dst[d - offset]
|
||||
// d++
|
||||
// length--
|
||||
// if length == 0 {
|
||||
// break
|
||||
// }
|
||||
// }
|
||||
MOVB (R15), BX
|
||||
MOVB BX, (DI)
|
||||
INCQ R15
|
||||
INCQ DI
|
||||
DECQ CX
|
||||
JNZ verySlowForwardCopy
|
||||
JMP loop
|
||||
|
||||
// The code above handles copy tags.
|
||||
// ----------------------------------------
|
||||
|
||||
end:
|
||||
// This is the end of the "for s < len(src)".
|
||||
//
|
||||
// if d != len(dst) { etc }
|
||||
CMPQ DI, R10
|
||||
JNE errCorrupt
|
||||
|
||||
// return 0
|
||||
MOVQ $0, ret+48(FP)
|
||||
RET
|
||||
|
||||
errCorrupt:
|
||||
// return decodeErrCodeCorrupt
|
||||
MOVQ $1, ret+48(FP)
|
||||
RET
|
||||
101
vendor/github.com/golang/snappy/decode_other.go
generated
vendored
Normal file
@ -0,0 +1,101 @@
|
||||
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !amd64 appengine !gc noasm
|
||||
|
||||
package snappy
|
||||
|
||||
// decode writes the decoding of src to dst. It assumes that the varint-encoded
|
||||
// length of the decompressed bytes has already been read, and that len(dst)
|
||||
// equals that length.
|
||||
//
|
||||
// It returns 0 on success or a decodeErrCodeXxx error code on failure.
|
||||
func decode(dst, src []byte) int {
|
||||
var d, s, offset, length int
|
||||
for s < len(src) {
|
||||
switch src[s] & 0x03 {
|
||||
case tagLiteral:
|
||||
x := uint32(src[s] >> 2)
|
||||
switch {
|
||||
case x < 60:
|
||||
s++
|
||||
case x == 60:
|
||||
s += 2
|
||||
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||
return decodeErrCodeCorrupt
|
||||
}
|
||||
x = uint32(src[s-1])
|
||||
case x == 61:
|
||||
s += 3
|
||||
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||
return decodeErrCodeCorrupt
|
||||
}
|
||||
x = uint32(src[s-2]) | uint32(src[s-1])<<8
|
||||
case x == 62:
|
||||
s += 4
|
||||
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||
return decodeErrCodeCorrupt
|
||||
}
|
||||
x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
|
||||
case x == 63:
|
||||
s += 5
|
||||
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||
return decodeErrCodeCorrupt
|
||||
}
|
||||
x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
|
||||
}
|
||||
length = int(x) + 1
|
||||
if length <= 0 {
|
||||
return decodeErrCodeUnsupportedLiteralLength
|
||||
}
|
||||
if length > len(dst)-d || length > len(src)-s {
|
||||
return decodeErrCodeCorrupt
|
||||
}
|
||||
copy(dst[d:], src[s:s+length])
|
||||
d += length
|
||||
s += length
|
||||
continue
|
||||
|
||||
case tagCopy1:
|
||||
s += 2
|
||||
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||
return decodeErrCodeCorrupt
|
||||
}
|
||||
length = 4 + int(src[s-2])>>2&0x7
|
||||
offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
|
||||
|
||||
case tagCopy2:
|
||||
s += 3
|
||||
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||
return decodeErrCodeCorrupt
|
||||
}
|
||||
length = 1 + int(src[s-3])>>2
|
||||
offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
|
||||
|
||||
case tagCopy4:
|
||||
s += 5
|
||||
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||
return decodeErrCodeCorrupt
|
||||
}
|
||||
length = 1 + int(src[s-5])>>2
|
||||
offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
|
||||
}
|
||||
|
||||
if offset <= 0 || d < offset || length > len(dst)-d {
|
||||
return decodeErrCodeCorrupt
|
||||
}
|
||||
// Copy from an earlier sub-slice of dst to a later sub-slice. Unlike
|
||||
// the built-in copy function, this byte-by-byte copy always runs
|
||||
// forwards, even if the slices overlap. Conceptually, this is:
|
||||
//
|
||||
// d += forwardCopy(dst[d:d+length], dst[d-offset:])
|
||||
for end := d + length; d != end; d++ {
|
||||
dst[d] = dst[d-offset]
|
||||
}
|
||||
}
|
||||
if d != len(dst) {
|
||||
return decodeErrCodeCorrupt
|
||||
}
|
||||
return 0
|
||||
}
|
||||
285
vendor/github.com/golang/snappy/encode.go
generated
vendored
Normal file
@ -0,0 +1,285 @@
|
||||
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package snappy
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Encode returns the encoded form of src. The returned slice may be a sub-
|
||||
// slice of dst if dst was large enough to hold the entire encoded block.
|
||||
// Otherwise, a newly allocated slice will be returned.
|
||||
//
|
||||
// The dst and src must not overlap. It is valid to pass a nil dst.
|
||||
func Encode(dst, src []byte) []byte {
|
||||
if n := MaxEncodedLen(len(src)); n < 0 {
|
||||
panic(ErrTooLarge)
|
||||
} else if len(dst) < n {
|
||||
dst = make([]byte, n)
|
||||
}
|
||||
|
||||
// The block starts with the varint-encoded length of the decompressed bytes.
|
||||
d := binary.PutUvarint(dst, uint64(len(src)))
|
||||
|
||||
for len(src) > 0 {
|
||||
p := src
|
||||
src = nil
|
||||
if len(p) > maxBlockSize {
|
||||
p, src = p[:maxBlockSize], p[maxBlockSize:]
|
||||
}
|
||||
if len(p) < minNonLiteralBlockSize {
|
||||
d += emitLiteral(dst[d:], p)
|
||||
} else {
|
||||
d += encodeBlock(dst[d:], p)
|
||||
}
|
||||
}
|
||||
return dst[:d]
|
||||
}
|
||||
|
||||
// inputMargin is the minimum number of extra input bytes to keep, inside
|
||||
// encodeBlock's inner loop. On some architectures, this margin lets us
|
||||
// implement a fast path for emitLiteral, where the copy of short (<= 16 byte)
|
||||
// literals can be implemented as a single load to and store from a 16-byte
|
||||
// register. That literal's actual length can be as short as 1 byte, so this
|
||||
// can copy up to 15 bytes too much, but that's OK as subsequent iterations of
|
||||
// the encoding loop will fix up the copy overrun, and this inputMargin ensures
|
||||
// that we don't overrun the dst and src buffers.
|
||||
const inputMargin = 16 - 1
|
||||
|
||||
// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that
|
||||
// could be encoded with a copy tag. This is the minimum with respect to the
|
||||
// algorithm used by encodeBlock, not a minimum enforced by the file format.
|
||||
//
|
||||
// The encoded output must start with at least a 1 byte literal, as there are
|
||||
// no previous bytes to copy. A minimal (1 byte) copy after that, generated
|
||||
// from an emitCopy call in encodeBlock's main loop, would require at least
|
||||
// another inputMargin bytes, for the reason above: we want any emitLiteral
|
||||
// calls inside encodeBlock's main loop to use the fast path if possible, which
|
||||
// requires being able to overrun by inputMargin bytes. Thus,
|
||||
// minNonLiteralBlockSize equals 1 + 1 + inputMargin.
|
||||
//
|
||||
// The C++ code doesn't use this exact threshold, but it could, as discussed at
|
||||
// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion
|
||||
// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an
|
||||
// optimization. It should not affect the encoded form. This is tested by
|
||||
// TestSameEncodingAsCppShortCopies.
|
||||
const minNonLiteralBlockSize = 1 + 1 + inputMargin
|
||||
|
||||
// MaxEncodedLen returns the maximum length of a snappy block, given its
|
||||
// uncompressed length.
|
||||
//
|
||||
// It will return a negative value if srcLen is too large to encode.
|
||||
func MaxEncodedLen(srcLen int) int {
|
||||
n := uint64(srcLen)
|
||||
if n > 0xffffffff {
|
||||
return -1
|
||||
}
|
||||
// Compressed data can be defined as:
|
||||
// compressed := item* literal*
|
||||
// item := literal* copy
|
||||
//
|
||||
// The trailing literal sequence has a space blowup of at most 62/60
|
||||
// since a literal of length 60 needs one tag byte + one extra byte
|
||||
// for length information.
|
||||
//
|
||||
// Item blowup is trickier to measure. Suppose the "copy" op copies
|
||||
// 4 bytes of data. Because of a special check in the encoding code,
|
||||
// we produce a 4-byte copy only if the offset is < 65536. Therefore
|
||||
// the copy op takes 3 bytes to encode, and this type of item leads
|
||||
// to at most the 62/60 blowup for representing literals.
|
||||
//
|
||||
// Suppose the "copy" op copies 5 bytes of data. If the offset is big
|
||||
// enough, it will take 5 bytes to encode the copy op. Therefore the
|
||||
// worst case here is a one-byte literal followed by a five-byte copy.
|
||||
// That is, 6 bytes of input turn into 7 bytes of "compressed" data.
|
||||
//
|
||||
// This last factor dominates the blowup, so the final estimate is:
|
||||
n = 32 + n + n/6
|
||||
if n > 0xffffffff {
|
||||
return -1
|
||||
}
|
||||
return int(n)
|
||||
}
|
||||
|
||||
var errClosed = errors.New("snappy: Writer is closed")
|
||||
|
||||
// NewWriter returns a new Writer that compresses to w.
|
||||
//
|
||||
// The Writer returned does not buffer writes. There is no need to Flush or
|
||||
// Close such a Writer.
|
||||
//
|
||||
// Deprecated: the Writer returned is not suitable for many small writes, only
|
||||
// for few large writes. Use NewBufferedWriter instead, which is efficient
|
||||
// regardless of the frequency and shape of the writes, and remember to Close
|
||||
// that Writer when done.
|
||||
func NewWriter(w io.Writer) *Writer {
|
||||
return &Writer{
|
||||
w: w,
|
||||
obuf: make([]byte, obufLen),
|
||||
}
|
||||
}
|
||||
|
||||
// NewBufferedWriter returns a new Writer that compresses to w, using the
|
||||
// framing format described at
|
||||
// https://github.com/google/snappy/blob/master/framing_format.txt
|
||||
//
|
||||
// The Writer returned buffers writes. Users must call Close to guarantee all
|
||||
// data has been forwarded to the underlying io.Writer. They may also call
|
||||
// Flush zero or more times before calling Close.
|
||||
func NewBufferedWriter(w io.Writer) *Writer {
|
||||
return &Writer{
|
||||
w: w,
|
||||
ibuf: make([]byte, 0, maxBlockSize),
|
||||
obuf: make([]byte, obufLen),
|
||||
}
|
||||
}
|
||||
|
||||
// Writer is an io.Writer that can write Snappy-compressed bytes.
|
||||
type Writer struct {
|
||||
w io.Writer
|
||||
err error
|
||||
|
||||
// ibuf is a buffer for the incoming (uncompressed) bytes.
|
||||
//
|
||||
// Its use is optional. For backwards compatibility, Writers created by the
|
||||
// NewWriter function have ibuf == nil, do not buffer incoming bytes, and
|
||||
// therefore do not need to be Flush'ed or Close'd.
|
||||
ibuf []byte
|
||||
|
||||
// obuf is a buffer for the outgoing (compressed) bytes.
|
||||
obuf []byte
|
||||
|
||||
// wroteStreamHeader is whether we have written the stream header.
|
||||
wroteStreamHeader bool
|
||||
}
|
||||
|
||||
// Reset discards the writer's state and switches the Snappy writer to write to
|
||||
// w. This permits reusing a Writer rather than allocating a new one.
|
||||
func (w *Writer) Reset(writer io.Writer) {
|
||||
w.w = writer
|
||||
w.err = nil
|
||||
if w.ibuf != nil {
|
||||
w.ibuf = w.ibuf[:0]
|
||||
}
|
||||
w.wroteStreamHeader = false
|
||||
}
|
||||
|
||||
// Write satisfies the io.Writer interface.
|
||||
func (w *Writer) Write(p []byte) (nRet int, errRet error) {
|
||||
if w.ibuf == nil {
|
||||
// Do not buffer incoming bytes. This does not perform or compress well
|
||||
// if the caller of Writer.Write writes many small slices. This
|
||||
// behavior is therefore deprecated, but still supported for backwards
|
||||
// compatibility with code that doesn't explicitly Flush or Close.
|
||||
return w.write(p)
|
||||
}
|
||||
|
||||
// The remainder of this method is based on bufio.Writer.Write from the
|
||||
// standard library.
|
||||
|
||||
for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil {
|
||||
var n int
|
||||
if len(w.ibuf) == 0 {
|
||||
// Large write, empty buffer.
|
||||
// Write directly from p to avoid copy.
|
||||
n, _ = w.write(p)
|
||||
} else {
|
||||
n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
|
||||
w.ibuf = w.ibuf[:len(w.ibuf)+n]
|
||||
w.Flush()
|
||||
}
|
||||
nRet += n
|
||||
p = p[n:]
|
||||
}
|
||||
if w.err != nil {
|
||||
return nRet, w.err
|
||||
}
|
||||
n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
|
||||
w.ibuf = w.ibuf[:len(w.ibuf)+n]
|
||||
nRet += n
|
||||
return nRet, nil
|
||||
}
|
||||
|
||||
func (w *Writer) write(p []byte) (nRet int, errRet error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
for len(p) > 0 {
|
||||
obufStart := len(magicChunk)
|
||||
if !w.wroteStreamHeader {
|
||||
w.wroteStreamHeader = true
|
||||
copy(w.obuf, magicChunk)
|
||||
obufStart = 0
|
||||
}
|
||||
|
||||
var uncompressed []byte
|
||||
if len(p) > maxBlockSize {
|
||||
uncompressed, p = p[:maxBlockSize], p[maxBlockSize:]
|
||||
} else {
|
||||
uncompressed, p = p, nil
|
||||
}
|
||||
checksum := crc(uncompressed)
|
||||
|
||||
// Compress the buffer, discarding the result if the improvement
|
||||
// isn't at least 12.5%.
|
||||
compressed := Encode(w.obuf[obufHeaderLen:], uncompressed)
|
||||
chunkType := uint8(chunkTypeCompressedData)
|
||||
chunkLen := 4 + len(compressed)
|
||||
obufEnd := obufHeaderLen + len(compressed)
|
||||
if len(compressed) >= len(uncompressed)-len(uncompressed)/8 {
|
||||
chunkType = chunkTypeUncompressedData
|
||||
chunkLen = 4 + len(uncompressed)
|
||||
obufEnd = obufHeaderLen
|
||||
}
|
||||
|
||||
// Fill in the per-chunk header that comes before the body.
|
||||
w.obuf[len(magicChunk)+0] = chunkType
|
||||
w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0)
|
||||
w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8)
|
||||
w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16)
|
||||
w.obuf[len(magicChunk)+4] = uint8(checksum >> 0)
|
||||
w.obuf[len(magicChunk)+5] = uint8(checksum >> 8)
|
||||
w.obuf[len(magicChunk)+6] = uint8(checksum >> 16)
|
||||
w.obuf[len(magicChunk)+7] = uint8(checksum >> 24)
|
||||
|
||||
if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil {
|
||||
w.err = err
|
||||
return nRet, err
|
||||
}
|
||||
if chunkType == chunkTypeUncompressedData {
|
||||
if _, err := w.w.Write(uncompressed); err != nil {
|
||||
w.err = err
|
||||
return nRet, err
|
||||
}
|
||||
}
|
||||
nRet += len(uncompressed)
|
||||
}
|
||||
return nRet, nil
|
||||
}
|
||||
|
||||
// Flush flushes the Writer to its underlying io.Writer.
|
||||
func (w *Writer) Flush() error {
|
||||
if w.err != nil {
|
||||
return w.err
|
||||
}
|
||||
if len(w.ibuf) == 0 {
|
||||
return nil
|
||||
}
|
||||
w.write(w.ibuf)
|
||||
w.ibuf = w.ibuf[:0]
|
||||
return w.err
|
||||
}
|
||||
|
||||
// Close calls Flush and then closes the Writer.
|
||||
func (w *Writer) Close() error {
|
||||
w.Flush()
|
||||
ret := w.err
|
||||
if w.err == nil {
|
||||
w.err = errClosed
|
||||
}
|
||||
return ret
|
||||
}
|
||||
29
vendor/github.com/golang/snappy/encode_amd64.go
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !noasm
|
||||
|
||||
package snappy
|
||||
|
||||
// emitLiteral has the same semantics as in encode_other.go.
|
||||
//
|
||||
//go:noescape
|
||||
func emitLiteral(dst, lit []byte) int
|
||||
|
||||
// emitCopy has the same semantics as in encode_other.go.
|
||||
//
|
||||
//go:noescape
|
||||
func emitCopy(dst []byte, offset, length int) int
|
||||
|
||||
// extendMatch has the same semantics as in encode_other.go.
|
||||
//
|
||||
//go:noescape
|
||||
func extendMatch(src []byte, i, j int) int
|
||||
|
||||
// encodeBlock has the same semantics as in encode_other.go.
|
||||
//
|
||||
//go:noescape
|
||||
func encodeBlock(dst, src []byte) (d int)
|
||||
730
vendor/github.com/golang/snappy/encode_amd64.s
generated
vendored
Normal file
@ -0,0 +1,730 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !noasm
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a
|
||||
// Go toolchain regression. See https://github.com/golang/go/issues/15426 and
|
||||
// https://github.com/golang/snappy/issues/29
|
||||
//
|
||||
// As a workaround, the package was built with a known good assembler, and
|
||||
// those instructions were disassembled by "objdump -d" to yield the
|
||||
// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
|
||||
// style comments, in AT&T asm syntax. Note that rsp here is a physical
|
||||
// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm).
|
||||
// The instructions were then encoded as "BYTE $0x.." sequences, which assemble
|
||||
// fine on Go 1.6.
|
||||
|
||||
// The asm code generally follows the pure Go code in encode_other.go, except
|
||||
// where marked with a "!!!".
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// func emitLiteral(dst, lit []byte) int
|
||||
//
|
||||
// All local variables fit into registers. The register allocation:
|
||||
// - AX len(lit)
|
||||
// - BX n
|
||||
// - DX return value
|
||||
// - DI &dst[i]
|
||||
// - R10 &lit[0]
|
||||
//
|
||||
// The 24 bytes of stack space is to call runtime·memmove.
|
||||
//
|
||||
// The unusual register allocation of local variables, such as R10 for the
|
||||
// source pointer, matches the allocation used at the call site in encodeBlock,
|
||||
// which makes it easier to manually inline this function.
|
||||
TEXT ·emitLiteral(SB), NOSPLIT, $24-56
|
||||
MOVQ dst_base+0(FP), DI
|
||||
MOVQ lit_base+24(FP), R10
|
||||
MOVQ lit_len+32(FP), AX
|
||||
MOVQ AX, DX
|
||||
MOVL AX, BX
|
||||
SUBL $1, BX
|
||||
|
||||
CMPL BX, $60
|
||||
JLT oneByte
|
||||
CMPL BX, $256
|
||||
JLT twoBytes
|
||||
|
||||
threeBytes:
|
||||
MOVB $0xf4, 0(DI)
|
||||
MOVW BX, 1(DI)
|
||||
ADDQ $3, DI
|
||||
ADDQ $3, DX
|
||||
JMP memmove
|
||||
|
||||
twoBytes:
|
||||
MOVB $0xf0, 0(DI)
|
||||
MOVB BX, 1(DI)
|
||||
ADDQ $2, DI
|
||||
ADDQ $2, DX
|
||||
JMP memmove
|
||||
|
||||
oneByte:
|
||||
SHLB $2, BX
|
||||
MOVB BX, 0(DI)
|
||||
ADDQ $1, DI
|
||||
ADDQ $1, DX
|
||||
|
||||
memmove:
|
||||
MOVQ DX, ret+48(FP)
|
||||
|
||||
// copy(dst[i:], lit)
|
||||
//
|
||||
// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
|
||||
// DI, R10 and AX as arguments.
|
||||
MOVQ DI, 0(SP)
|
||||
MOVQ R10, 8(SP)
|
||||
MOVQ AX, 16(SP)
|
||||
CALL runtime·memmove(SB)
|
||||
RET
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// func emitCopy(dst []byte, offset, length int) int
|
||||
//
|
||||
// All local variables fit into registers. The register allocation:
|
||||
// - AX length
|
||||
// - SI &dst[0]
|
||||
// - DI &dst[i]
|
||||
// - R11 offset
|
||||
//
|
||||
// The unusual register allocation of local variables, such as R11 for the
|
||||
// offset, matches the allocation used at the call site in encodeBlock, which
|
||||
// makes it easier to manually inline this function.
|
||||
TEXT ·emitCopy(SB), NOSPLIT, $0-48
|
||||
MOVQ dst_base+0(FP), DI
|
||||
MOVQ DI, SI
|
||||
MOVQ offset+24(FP), R11
|
||||
MOVQ length+32(FP), AX
|
||||
|
||||
loop0:
|
||||
// for length >= 68 { etc }
|
||||
CMPL AX, $68
|
||||
JLT step1
|
||||
|
||||
// Emit a length 64 copy, encoded as 3 bytes.
|
||||
MOVB $0xfe, 0(DI)
|
||||
MOVW R11, 1(DI)
|
||||
ADDQ $3, DI
|
||||
SUBL $64, AX
|
||||
JMP loop0
|
||||
|
||||
step1:
|
||||
// if length > 64 { etc }
|
||||
CMPL AX, $64
|
||||
JLE step2
|
||||
|
||||
// Emit a length 60 copy, encoded as 3 bytes.
|
||||
MOVB $0xee, 0(DI)
|
||||
MOVW R11, 1(DI)
|
||||
ADDQ $3, DI
|
||||
SUBL $60, AX
|
||||
|
||||
step2:
|
||||
// if length >= 12 || offset >= 2048 { goto step3 }
|
||||
CMPL AX, $12
|
||||
JGE step3
|
||||
CMPL R11, $2048
|
||||
JGE step3
|
||||
|
||||
// Emit the remaining copy, encoded as 2 bytes.
|
||||
MOVB R11, 1(DI)
|
||||
SHRL $8, R11
|
||||
SHLB $5, R11
|
||||
SUBB $4, AX
|
||||
SHLB $2, AX
|
||||
ORB AX, R11
|
||||
ORB $1, R11
|
||||
MOVB R11, 0(DI)
|
||||
ADDQ $2, DI
|
||||
|
||||
// Return the number of bytes written.
|
||||
SUBQ SI, DI
|
||||
MOVQ DI, ret+40(FP)
|
||||
RET
|
||||
|
||||
step3:
|
||||
// Emit the remaining copy, encoded as 3 bytes.
|
||||
SUBL $1, AX
|
||||
SHLB $2, AX
|
||||
ORB $2, AX
|
||||
MOVB AX, 0(DI)
|
||||
MOVW R11, 1(DI)
|
||||
ADDQ $3, DI
|
||||
|
||||
// Return the number of bytes written.
|
||||
SUBQ SI, DI
|
||||
MOVQ DI, ret+40(FP)
|
||||
RET
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// func extendMatch(src []byte, i, j int) int
|
||||
//
|
||||
// All local variables fit into registers. The register allocation:
|
||||
// - DX &src[0]
|
||||
// - SI &src[j]
|
||||
// - R13 &src[len(src) - 8]
|
||||
// - R14 &src[len(src)]
|
||||
// - R15 &src[i]
|
||||
//
|
||||
// The unusual register allocation of local variables, such as R15 for a source
|
||||
// pointer, matches the allocation used at the call site in encodeBlock, which
|
||||
// makes it easier to manually inline this function.
|
||||
TEXT ·extendMatch(SB), NOSPLIT, $0-48
|
||||
MOVQ src_base+0(FP), DX
|
||||
MOVQ src_len+8(FP), R14
|
||||
MOVQ i+24(FP), R15
|
||||
MOVQ j+32(FP), SI
|
||||
ADDQ DX, R14
|
||||
ADDQ DX, R15
|
||||
ADDQ DX, SI
|
||||
MOVQ R14, R13
|
||||
SUBQ $8, R13
|
||||
|
||||
cmp8:
|
||||
// As long as we are 8 or more bytes before the end of src, we can load and
|
||||
// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
|
||||
CMPQ SI, R13
|
||||
JA cmp1
|
||||
MOVQ (R15), AX
|
||||
MOVQ (SI), BX
|
||||
CMPQ AX, BX
|
||||
JNE bsf
|
||||
ADDQ $8, R15
|
||||
ADDQ $8, SI
|
||||
JMP cmp8
|
||||
|
||||
bsf:
|
||||
// If those 8 bytes were not equal, XOR the two 8 byte values, and return
|
||||
// the index of the first byte that differs. The BSF instruction finds the
|
||||
// least significant 1 bit, the amd64 architecture is little-endian, and
|
||||
// the shift by 3 converts a bit index to a byte index.
|
||||
XORQ AX, BX
|
||||
BSFQ BX, BX
|
||||
SHRQ $3, BX
|
||||
ADDQ BX, SI
|
||||
|
||||
// Convert from &src[ret] to ret.
|
||||
SUBQ DX, SI
|
||||
MOVQ SI, ret+40(FP)
|
||||
RET
|
||||
|
||||
cmp1:
|
||||
// In src's tail, compare 1 byte at a time.
|
||||
CMPQ SI, R14
|
||||
JAE extendMatchEnd
|
||||
MOVB (R15), AX
|
||||
MOVB (SI), BX
|
||||
CMPB AX, BX
|
||||
JNE extendMatchEnd
|
||||
ADDQ $1, R15
|
||||
ADDQ $1, SI
|
||||
JMP cmp1
|
||||
|
||||
extendMatchEnd:
|
||||
// Convert from &src[ret] to ret.
|
||||
SUBQ DX, SI
|
||||
MOVQ SI, ret+40(FP)
|
||||
RET
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// func encodeBlock(dst, src []byte) (d int)
|
||||
//
|
||||
// All local variables fit into registers, other than "var table". The register
|
||||
// allocation:
|
||||
// - AX . .
|
||||
// - BX . .
|
||||
// - CX 56 shift (note that amd64 shifts by non-immediates must use CX).
|
||||
// - DX 64 &src[0], tableSize
|
||||
// - SI 72 &src[s]
|
||||
// - DI 80 &dst[d]
|
||||
// - R9 88 sLimit
|
||||
// - R10 . &src[nextEmit]
|
||||
// - R11 96 prevHash, currHash, nextHash, offset
|
||||
// - R12 104 &src[base], skip
|
||||
// - R13 . &src[nextS], &src[len(src) - 8]
|
||||
// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x
|
||||
// - R15 112 candidate
|
||||
//
|
||||
// The second column (56, 64, etc) is the stack offset to spill the registers
|
||||
// when calling other functions. We could pack this slightly tighter, but it's
|
||||
// simpler to have a dedicated spill map independent of the function called.
|
||||
//
|
||||
// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An
|
||||
// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill
|
||||
// local variables (registers) during calls gives 32768 + 56 + 64 = 32888.
|
||||
TEXT ·encodeBlock(SB), 0, $32888-56
|
||||
MOVQ dst_base+0(FP), DI
|
||||
MOVQ src_base+24(FP), SI
|
||||
MOVQ src_len+32(FP), R14
|
||||
|
||||
// shift, tableSize := uint32(32-8), 1<<8
|
||||
MOVQ $24, CX
|
||||
MOVQ $256, DX
|
||||
|
||||
calcShift:
|
||||
// for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
|
||||
// shift--
|
||||
// }
|
||||
CMPQ DX, $16384
|
||||
JGE varTable
|
||||
CMPQ DX, R14
|
||||
JGE varTable
|
||||
SUBQ $1, CX
|
||||
SHLQ $1, DX
|
||||
JMP calcShift
|
||||
|
||||
varTable:
|
||||
// var table [maxTableSize]uint16
|
||||
//
|
||||
// In the asm code, unlike the Go code, we can zero-initialize only the
|
||||
// first tableSize elements. Each uint16 element is 2 bytes and each MOVOU
|
||||
// writes 16 bytes, so we can do only tableSize/8 writes instead of the
|
||||
// 2048 writes that would zero-initialize all of table's 32768 bytes.
|
||||
SHRQ $3, DX
|
||||
LEAQ table-32768(SP), BX
|
||||
PXOR X0, X0
|
||||
|
||||
memclr:
|
||||
MOVOU X0, 0(BX)
|
||||
ADDQ $16, BX
|
||||
SUBQ $1, DX
|
||||
JNZ memclr
|
||||
|
||||
// !!! DX = &src[0]
|
||||
MOVQ SI, DX
|
||||
|
||||
// sLimit := len(src) - inputMargin
|
||||
MOVQ R14, R9
|
||||
SUBQ $15, R9
|
||||
|
||||
// !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't
|
||||
// change for the rest of the function.
|
||||
MOVQ CX, 56(SP)
|
||||
MOVQ DX, 64(SP)
|
||||
MOVQ R9, 88(SP)
|
||||
|
||||
// nextEmit := 0
|
||||
MOVQ DX, R10
|
||||
|
||||
// s := 1
|
||||
ADDQ $1, SI
|
||||
|
||||
// nextHash := hash(load32(src, s), shift)
|
||||
MOVL 0(SI), R11
|
||||
IMULL $0x1e35a7bd, R11
|
||||
SHRL CX, R11
|
||||
|
||||
outer:
|
||||
// for { etc }
|
||||
|
||||
// skip := 32
|
||||
MOVQ $32, R12
|
||||
|
||||
// nextS := s
|
||||
MOVQ SI, R13
|
||||
|
||||
// candidate := 0
|
||||
MOVQ $0, R15
|
||||
|
||||
inner0:
|
||||
// for { etc }
|
||||
|
||||
// s := nextS
|
||||
MOVQ R13, SI
|
||||
|
||||
// bytesBetweenHashLookups := skip >> 5
|
||||
MOVQ R12, R14
|
||||
SHRQ $5, R14
|
||||
|
||||
// nextS = s + bytesBetweenHashLookups
|
||||
ADDQ R14, R13
|
||||
|
||||
// skip += bytesBetweenHashLookups
|
||||
ADDQ R14, R12
|
||||
|
||||
// if nextS > sLimit { goto emitRemainder }
|
||||
MOVQ R13, AX
|
||||
SUBQ DX, AX
|
||||
CMPQ AX, R9
|
||||
JA emitRemainder
|
||||
|
||||
// candidate = int(table[nextHash])
|
||||
// XXX: MOVWQZX table-32768(SP)(R11*2), R15
|
||||
// XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
|
||||
BYTE $0x4e
|
||||
BYTE $0x0f
|
||||
BYTE $0xb7
|
||||
BYTE $0x7c
|
||||
BYTE $0x5c
|
||||
BYTE $0x78
|
||||
|
||||
// table[nextHash] = uint16(s)
|
||||
MOVQ SI, AX
|
||||
SUBQ DX, AX
|
||||
|
||||
// XXX: MOVW AX, table-32768(SP)(R11*2)
|
||||
// XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
|
||||
BYTE $0x66
|
||||
BYTE $0x42
|
||||
BYTE $0x89
|
||||
BYTE $0x44
|
||||
BYTE $0x5c
|
||||
BYTE $0x78
|
||||
|
||||
// nextHash = hash(load32(src, nextS), shift)
|
||||
MOVL 0(R13), R11
|
||||
IMULL $0x1e35a7bd, R11
|
||||
SHRL CX, R11
|
||||
|
||||
// if load32(src, s) != load32(src, candidate) { continue } break
|
||||
MOVL 0(SI), AX
|
||||
MOVL (DX)(R15*1), BX
|
||||
CMPL AX, BX
|
||||
JNE inner0
|
||||
|
||||
fourByteMatch:
|
||||
// As per the encode_other.go code:
|
||||
//
|
||||
// A 4-byte match has been found. We'll later see etc.
|
||||
|
||||
// !!! Jump to a fast path for short (<= 16 byte) literals. See the comment
|
||||
// on inputMargin in encode.go.
|
||||
MOVQ SI, AX
|
||||
SUBQ R10, AX
|
||||
CMPQ AX, $16
|
||||
JLE emitLiteralFastPath
|
||||
|
||||
// ----------------------------------------
|
||||
// Begin inline of the emitLiteral call.
|
||||
//
|
||||
// d += emitLiteral(dst[d:], src[nextEmit:s])
|
||||
|
||||
MOVL AX, BX
|
||||
SUBL $1, BX
|
||||
|
||||
CMPL BX, $60
|
||||
JLT inlineEmitLiteralOneByte
|
||||
CMPL BX, $256
|
||||
JLT inlineEmitLiteralTwoBytes
|
||||
|
||||
inlineEmitLiteralThreeBytes:
|
||||
MOVB $0xf4, 0(DI)
|
||||
MOVW BX, 1(DI)
|
||||
ADDQ $3, DI
|
||||
JMP inlineEmitLiteralMemmove
|
||||
|
||||
inlineEmitLiteralTwoBytes:
|
||||
MOVB $0xf0, 0(DI)
|
||||
MOVB BX, 1(DI)
|
||||
ADDQ $2, DI
|
||||
JMP inlineEmitLiteralMemmove
|
||||
|
||||
inlineEmitLiteralOneByte:
|
||||
SHLB $2, BX
|
||||
MOVB BX, 0(DI)
|
||||
ADDQ $1, DI
|
||||
|
||||
inlineEmitLiteralMemmove:
|
||||
// Spill local variables (registers) onto the stack; call; unspill.
|
||||
//
|
||||
// copy(dst[i:], lit)
|
||||
//
|
||||
// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
|
||||
// DI, R10 and AX as arguments.
|
||||
MOVQ DI, 0(SP)
|
||||
MOVQ R10, 8(SP)
|
||||
MOVQ AX, 16(SP)
|
||||
ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)".
|
||||
MOVQ SI, 72(SP)
|
||||
MOVQ DI, 80(SP)
|
||||
MOVQ R15, 112(SP)
|
||||
CALL runtime·memmove(SB)
|
||||
MOVQ 56(SP), CX
|
||||
MOVQ 64(SP), DX
|
||||
MOVQ 72(SP), SI
|
||||
MOVQ 80(SP), DI
|
||||
MOVQ 88(SP), R9
|
||||
MOVQ 112(SP), R15
|
||||
JMP inner1
|
||||
|
||||
inlineEmitLiteralEnd:
|
||||
// End inline of the emitLiteral call.
|
||||
// ----------------------------------------
|
||||
|
||||
emitLiteralFastPath:
|
||||
// !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2".
|
||||
MOVB AX, BX
|
||||
SUBB $1, BX
|
||||
SHLB $2, BX
|
||||
MOVB BX, (DI)
|
||||
ADDQ $1, DI
|
||||
|
||||
// !!! Implement the copy from lit to dst as a 16-byte load and store.
|
||||
// (Encode's documentation says that dst and src must not overlap.)
|
||||
//
|
||||
// This always copies 16 bytes, instead of only len(lit) bytes, but that's
|
||||
// OK. Subsequent iterations will fix up the overrun.
|
||||
//
|
||||
// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
|
||||
// 16-byte loads and stores. This technique probably wouldn't be as
|
||||
// effective on architectures that are fussier about alignment.
|
||||
MOVOU 0(R10), X0
|
||||
MOVOU X0, 0(DI)
|
||||
ADDQ AX, DI
|
||||
|
||||
inner1:
|
||||
// for { etc }
|
||||
|
||||
// base := s
|
||||
MOVQ SI, R12
|
||||
|
||||
// !!! offset := base - candidate
|
||||
MOVQ R12, R11
|
||||
SUBQ R15, R11
|
||||
SUBQ DX, R11
|
||||
|
||||
// ----------------------------------------
|
||||
// Begin inline of the extendMatch call.
|
||||
//
|
||||
// s = extendMatch(src, candidate+4, s+4)
|
||||
|
||||
// !!! R14 = &src[len(src)]
|
||||
MOVQ src_len+32(FP), R14
|
||||
ADDQ DX, R14
|
||||
|
||||
// !!! R13 = &src[len(src) - 8]
|
||||
MOVQ R14, R13
|
||||
SUBQ $8, R13
|
||||
|
||||
// !!! R15 = &src[candidate + 4]
|
||||
ADDQ $4, R15
|
||||
ADDQ DX, R15
|
||||
|
||||
// !!! s += 4
|
||||
ADDQ $4, SI
|
||||
|
||||
inlineExtendMatchCmp8:
|
||||
// As long as we are 8 or more bytes before the end of src, we can load and
|
||||
// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
|
||||
CMPQ SI, R13
|
||||
JA inlineExtendMatchCmp1
|
||||
MOVQ (R15), AX
|
||||
MOVQ (SI), BX
|
||||
CMPQ AX, BX
|
||||
JNE inlineExtendMatchBSF
|
||||
ADDQ $8, R15
|
||||
ADDQ $8, SI
|
||||
JMP inlineExtendMatchCmp8
|
||||
|
||||
inlineExtendMatchBSF:
|
||||
// If those 8 bytes were not equal, XOR the two 8 byte values, and return
|
||||
// the index of the first byte that differs. The BSF instruction finds the
|
||||
// least significant 1 bit, the amd64 architecture is little-endian, and
|
||||
// the shift by 3 converts a bit index to a byte index.
|
||||
XORQ AX, BX
|
||||
BSFQ BX, BX
|
||||
SHRQ $3, BX
|
||||
ADDQ BX, SI
|
||||
JMP inlineExtendMatchEnd
|
||||
|
||||
inlineExtendMatchCmp1:
|
||||
// In src's tail, compare 1 byte at a time.
|
||||
CMPQ SI, R14
|
||||
JAE inlineExtendMatchEnd
|
||||
MOVB (R15), AX
|
||||
MOVB (SI), BX
|
||||
CMPB AX, BX
|
||||
JNE inlineExtendMatchEnd
|
||||
ADDQ $1, R15
|
||||
ADDQ $1, SI
|
||||
JMP inlineExtendMatchCmp1
|
||||
|
||||
inlineExtendMatchEnd:
|
||||
// End inline of the extendMatch call.
|
||||
// ----------------------------------------
|
||||
|
||||
// ----------------------------------------
|
||||
// Begin inline of the emitCopy call.
|
||||
//
|
||||
// d += emitCopy(dst[d:], base-candidate, s-base)
|
||||
|
||||
// !!! length := s - base
|
||||
MOVQ SI, AX
|
||||
SUBQ R12, AX
|
||||
|
||||
inlineEmitCopyLoop0:
|
||||
// for length >= 68 { etc }
|
||||
CMPL AX, $68
|
||||
JLT inlineEmitCopyStep1
|
||||
|
||||
// Emit a length 64 copy, encoded as 3 bytes.
|
||||
MOVB $0xfe, 0(DI)
|
||||
MOVW R11, 1(DI)
|
||||
ADDQ $3, DI
|
||||
SUBL $64, AX
|
||||
JMP inlineEmitCopyLoop0
|
||||
|
||||
inlineEmitCopyStep1:
|
||||
// if length > 64 { etc }
|
||||
CMPL AX, $64
|
||||
JLE inlineEmitCopyStep2
|
||||
|
||||
// Emit a length 60 copy, encoded as 3 bytes.
|
||||
MOVB $0xee, 0(DI)
|
||||
MOVW R11, 1(DI)
|
||||
ADDQ $3, DI
|
||||
SUBL $60, AX
|
||||
|
||||
inlineEmitCopyStep2:
|
||||
// if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 }
|
||||
CMPL AX, $12
|
||||
JGE inlineEmitCopyStep3
|
||||
CMPL R11, $2048
|
||||
JGE inlineEmitCopyStep3
|
||||
|
||||
// Emit the remaining copy, encoded as 2 bytes.
|
||||
MOVB R11, 1(DI)
|
||||
SHRL $8, R11
|
||||
SHLB $5, R11
|
||||
SUBB $4, AX
|
||||
SHLB $2, AX
|
||||
ORB AX, R11
|
||||
ORB $1, R11
|
||||
MOVB R11, 0(DI)
|
||||
ADDQ $2, DI
|
||||
JMP inlineEmitCopyEnd
|
||||
|
||||
inlineEmitCopyStep3:
|
||||
// Emit the remaining copy, encoded as 3 bytes.
|
||||
SUBL $1, AX
|
||||
SHLB $2, AX
|
||||
ORB $2, AX
|
||||
MOVB AX, 0(DI)
|
||||
MOVW R11, 1(DI)
|
||||
ADDQ $3, DI
|
||||
|
||||
inlineEmitCopyEnd:
|
||||
// End inline of the emitCopy call.
|
||||
// ----------------------------------------
|
||||
|
||||
// nextEmit = s
|
||||
MOVQ SI, R10
|
||||
|
||||
// if s >= sLimit { goto emitRemainder }
|
||||
MOVQ SI, AX
|
||||
SUBQ DX, AX
|
||||
CMPQ AX, R9
|
||||
JAE emitRemainder
|
||||
|
||||
// As per the encode_other.go code:
|
||||
//
|
||||
// We could immediately etc.
|
||||
|
||||
// x := load64(src, s-1)
|
||||
MOVQ -1(SI), R14
|
||||
|
||||
// prevHash := hash(uint32(x>>0), shift)
|
||||
MOVL R14, R11
|
||||
IMULL $0x1e35a7bd, R11
|
||||
SHRL CX, R11
|
||||
|
||||
// table[prevHash] = uint16(s-1)
|
||||
MOVQ SI, AX
|
||||
SUBQ DX, AX
|
||||
SUBQ $1, AX
|
||||
|
||||
// XXX: MOVW AX, table-32768(SP)(R11*2)
|
||||
// XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
|
||||
BYTE $0x66
|
||||
BYTE $0x42
|
||||
BYTE $0x89
|
||||
BYTE $0x44
|
||||
BYTE $0x5c
|
||||
BYTE $0x78
|
||||
|
||||
// currHash := hash(uint32(x>>8), shift)
|
||||
SHRQ $8, R14
|
||||
MOVL R14, R11
|
||||
IMULL $0x1e35a7bd, R11
|
||||
SHRL CX, R11
|
||||
|
||||
// candidate = int(table[currHash])
|
||||
// XXX: MOVWQZX table-32768(SP)(R11*2), R15
|
||||
// XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
|
||||
BYTE $0x4e
|
||||
BYTE $0x0f
|
||||
BYTE $0xb7
|
||||
BYTE $0x7c
|
||||
BYTE $0x5c
|
||||
BYTE $0x78
|
||||
|
||||
// table[currHash] = uint16(s)
|
||||
ADDQ $1, AX
|
||||
|
||||
// XXX: MOVW AX, table-32768(SP)(R11*2)
|
||||
// XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
|
||||
BYTE $0x66
|
||||
BYTE $0x42
|
||||
BYTE $0x89
|
||||
BYTE $0x44
|
||||
BYTE $0x5c
|
||||
BYTE $0x78
|
||||
|
||||
// if uint32(x>>8) == load32(src, candidate) { continue }
|
||||
MOVL (DX)(R15*1), BX
|
||||
CMPL R14, BX
|
||||
JEQ inner1
|
||||
|
||||
// nextHash = hash(uint32(x>>16), shift)
|
||||
SHRQ $8, R14
|
||||
MOVL R14, R11
|
||||
IMULL $0x1e35a7bd, R11
|
||||
SHRL CX, R11
|
||||
|
||||
// s++
|
||||
ADDQ $1, SI
|
||||
|
||||
// break out of the inner1 for loop, i.e. continue the outer loop.
|
||||
JMP outer
|
||||
|
||||
emitRemainder:
|
||||
// if nextEmit < len(src) { etc }
|
||||
MOVQ src_len+32(FP), AX
|
||||
ADDQ DX, AX
|
||||
CMPQ R10, AX
|
||||
JEQ encodeBlockEnd
|
||||
|
||||
// d += emitLiteral(dst[d:], src[nextEmit:])
|
||||
//
|
||||
// Push args.
|
||||
MOVQ DI, 0(SP)
|
||||
MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative.
|
||||
MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative.
|
||||
MOVQ R10, 24(SP)
|
||||
SUBQ R10, AX
|
||||
MOVQ AX, 32(SP)
|
||||
MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative.
|
||||
|
||||
// Spill local variables (registers) onto the stack; call; unspill.
|
||||
MOVQ DI, 80(SP)
|
||||
CALL ·emitLiteral(SB)
|
||||
MOVQ 80(SP), DI
|
||||
|
||||
// Finish the "d +=" part of "d += emitLiteral(etc)".
|
||||
ADDQ 48(SP), DI
|
||||
|
||||
encodeBlockEnd:
|
||||
MOVQ dst_base+0(FP), AX
|
||||
SUBQ AX, DI
|
||||
MOVQ DI, d+48(FP)
|
||||
RET
|
||||
238
vendor/github.com/golang/snappy/encode_other.go
generated
vendored
Normal file
@ -0,0 +1,238 @@
|
||||
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !amd64 appengine !gc noasm
|
||||
|
||||
package snappy
|
||||
|
||||
func load32(b []byte, i int) uint32 {
|
||||
b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
|
||||
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
|
||||
}
|
||||
|
||||
func load64(b []byte, i int) uint64 {
|
||||
b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
|
||||
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
|
||||
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
|
||||
}
|
||||
|
||||
// emitLiteral writes a literal chunk and returns the number of bytes written.
|
||||
//
|
||||
// It assumes that:
|
||||
// dst is long enough to hold the encoded bytes
|
||||
// 1 <= len(lit) && len(lit) <= 65536
|
||||
func emitLiteral(dst, lit []byte) int {
|
||||
i, n := 0, uint(len(lit)-1)
|
||||
switch {
|
||||
case n < 60:
|
||||
dst[0] = uint8(n)<<2 | tagLiteral
|
||||
i = 1
|
||||
case n < 1<<8:
|
||||
dst[0] = 60<<2 | tagLiteral
|
||||
dst[1] = uint8(n)
|
||||
i = 2
|
||||
default:
|
||||
dst[0] = 61<<2 | tagLiteral
|
||||
dst[1] = uint8(n)
|
||||
dst[2] = uint8(n >> 8)
|
||||
i = 3
|
||||
}
|
||||
return i + copy(dst[i:], lit)
|
||||
}
|
||||
|
||||
// emitCopy writes a copy chunk and returns the number of bytes written.
|
||||
//
|
||||
// It assumes that:
|
||||
// dst is long enough to hold the encoded bytes
|
||||
// 1 <= offset && offset <= 65535
|
||||
// 4 <= length && length <= 65535
|
||||
func emitCopy(dst []byte, offset, length int) int {
|
||||
i := 0
|
||||
// The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The
|
||||
// threshold for this loop is a little higher (at 68 = 64 + 4), and the
|
||||
// length emitted down below is is a little lower (at 60 = 64 - 4), because
|
||||
// it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed
|
||||
// by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as
|
||||
// a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as
|
||||
// 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a
|
||||
// tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an
|
||||
// encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1.
|
||||
for length >= 68 {
|
||||
// Emit a length 64 copy, encoded as 3 bytes.
|
||||
dst[i+0] = 63<<2 | tagCopy2
|
||||
dst[i+1] = uint8(offset)
|
||||
dst[i+2] = uint8(offset >> 8)
|
||||
i += 3
|
||||
length -= 64
|
||||
}
|
||||
if length > 64 {
|
||||
// Emit a length 60 copy, encoded as 3 bytes.
|
||||
dst[i+0] = 59<<2 | tagCopy2
|
||||
dst[i+1] = uint8(offset)
|
||||
dst[i+2] = uint8(offset >> 8)
|
||||
i += 3
|
||||
length -= 60
|
||||
}
|
||||
if length >= 12 || offset >= 2048 {
|
||||
// Emit the remaining copy, encoded as 3 bytes.
|
||||
dst[i+0] = uint8(length-1)<<2 | tagCopy2
|
||||
dst[i+1] = uint8(offset)
|
||||
dst[i+2] = uint8(offset >> 8)
|
||||
return i + 3
|
||||
}
|
||||
// Emit the remaining copy, encoded as 2 bytes.
|
||||
dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
|
||||
dst[i+1] = uint8(offset)
|
||||
return i + 2
|
||||
}
|
||||
|
||||
// extendMatch returns the largest k such that k <= len(src) and that
|
||||
// src[i:i+k-j] and src[j:k] have the same contents.
|
||||
//
|
||||
// It assumes that:
|
||||
// 0 <= i && i < j && j <= len(src)
|
||||
func extendMatch(src []byte, i, j int) int {
|
||||
for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
|
||||
}
|
||||
return j
|
||||
}
|
||||
|
||||
func hash(u, shift uint32) uint32 {
|
||||
return (u * 0x1e35a7bd) >> shift
|
||||
}
|
||||
|
||||
// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
|
||||
// assumes that the varint-encoded length of the decompressed bytes has already
|
||||
// been written.
|
||||
//
|
||||
// It also assumes that:
|
||||
// len(dst) >= MaxEncodedLen(len(src)) &&
|
||||
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
|
||||
func encodeBlock(dst, src []byte) (d int) {
|
||||
// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
|
||||
// The table element type is uint16, as s < sLimit and sLimit < len(src)
|
||||
// and len(src) <= maxBlockSize and maxBlockSize == 65536.
|
||||
const (
|
||||
maxTableSize = 1 << 14
|
||||
// tableMask is redundant, but helps the compiler eliminate bounds
|
||||
// checks.
|
||||
tableMask = maxTableSize - 1
|
||||
)
|
||||
shift := uint32(32 - 8)
|
||||
for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
|
||||
shift--
|
||||
}
|
||||
// In Go, all array elements are zero-initialized, so there is no advantage
|
||||
// to a smaller tableSize per se. However, it matches the C++ algorithm,
|
||||
// and in the asm versions of this code, we can get away with zeroing only
|
||||
// the first tableSize elements.
|
||||
var table [maxTableSize]uint16
|
||||
|
||||
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
||||
// lets us use a fast path for emitLiteral in the main loop, while we are
|
||||
// looking for copies.
|
||||
sLimit := len(src) - inputMargin
|
||||
|
||||
// nextEmit is where in src the next emitLiteral should start from.
|
||||
nextEmit := 0
|
||||
|
||||
// The encoded form must start with a literal, as there are no previous
|
||||
// bytes to copy, so we start looking for hash matches at s == 1.
|
||||
s := 1
|
||||
nextHash := hash(load32(src, s), shift)
|
||||
|
||||
for {
|
||||
// Copied from the C++ snappy implementation:
|
||||
//
|
||||
// Heuristic match skipping: If 32 bytes are scanned with no matches
|
||||
// found, start looking only at every other byte. If 32 more bytes are
|
||||
// scanned (or skipped), look at every third byte, etc.. When a match
|
||||
// is found, immediately go back to looking at every byte. This is a
|
||||
// small loss (~5% performance, ~0.1% density) for compressible data
|
||||
// due to more bookkeeping, but for non-compressible data (such as
|
||||
// JPEG) it's a huge win since the compressor quickly "realizes" the
|
||||
// data is incompressible and doesn't bother looking for matches
|
||||
// everywhere.
|
||||
//
|
||||
// The "skip" variable keeps track of how many bytes there are since
|
||||
// the last match; dividing it by 32 (ie. right-shifting by five) gives
|
||||
// the number of bytes to move ahead for each iteration.
|
||||
skip := 32
|
||||
|
||||
nextS := s
|
||||
candidate := 0
|
||||
for {
|
||||
s = nextS
|
||||
bytesBetweenHashLookups := skip >> 5
|
||||
nextS = s + bytesBetweenHashLookups
|
||||
skip += bytesBetweenHashLookups
|
||||
if nextS > sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
candidate = int(table[nextHash&tableMask])
|
||||
table[nextHash&tableMask] = uint16(s)
|
||||
nextHash = hash(load32(src, nextS), shift)
|
||||
if load32(src, s) == load32(src, candidate) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// A 4-byte match has been found. We'll later see if more than 4 bytes
|
||||
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
|
||||
// them as literal bytes.
|
||||
d += emitLiteral(dst[d:], src[nextEmit:s])
|
||||
|
||||
// Call emitCopy, and then see if another emitCopy could be our next
|
||||
// move. Repeat until we find no match for the input immediately after
|
||||
// what was consumed by the last emitCopy call.
|
||||
//
|
||||
// If we exit this loop normally then we need to call emitLiteral next,
|
||||
// though we don't yet know how big the literal will be. We handle that
|
||||
// by proceeding to the next iteration of the main loop. We also can
|
||||
// exit this loop via goto if we get close to exhausting the input.
|
||||
for {
|
||||
// Invariant: we have a 4-byte match at s, and no need to emit any
|
||||
// literal bytes prior to s.
|
||||
base := s
|
||||
|
||||
// Extend the 4-byte match as long as possible.
|
||||
//
|
||||
// This is an inlined version of:
|
||||
// s = extendMatch(src, candidate+4, s+4)
|
||||
s += 4
|
||||
for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 {
|
||||
}
|
||||
|
||||
d += emitCopy(dst[d:], base-candidate, s-base)
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
|
||||
// We could immediately start working at s now, but to improve
|
||||
// compression we first update the hash table at s-1 and at s. If
|
||||
// another emitCopy is not our next move, also calculate nextHash
|
||||
// at s+1. At least on GOARCH=amd64, these three hash calculations
|
||||
// are faster as one load64 call (with some shifts) instead of
|
||||
// three load32 calls.
|
||||
x := load64(src, s-1)
|
||||
prevHash := hash(uint32(x>>0), shift)
|
||||
table[prevHash&tableMask] = uint16(s - 1)
|
||||
currHash := hash(uint32(x>>8), shift)
|
||||
candidate = int(table[currHash&tableMask])
|
||||
table[currHash&tableMask] = uint16(s)
|
||||
if uint32(x>>8) != load32(src, candidate) {
|
||||
nextHash = hash(uint32(x>>16), shift)
|
||||
s++
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
emitRemainder:
|
||||
if nextEmit < len(src) {
|
||||
d += emitLiteral(dst[d:], src[nextEmit:])
|
||||
}
|
||||
return d
|
||||
}
|
||||
87
vendor/github.com/golang/snappy/snappy.go
generated
vendored
Normal file
@ -0,0 +1,87 @@
|
||||
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package snappy implements the snappy block-based compression format.
|
||||
// It aims for very high speeds and reasonable compression.
|
||||
//
|
||||
// The C++ snappy implementation is at https://github.com/google/snappy
|
||||
package snappy
|
||||
|
||||
import (
|
||||
"hash/crc32"
|
||||
)
|
||||
|
||||
/*
|
||||
Each encoded block begins with the varint-encoded length of the decoded data,
|
||||
followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
|
||||
first byte of each chunk is broken into its 2 least and 6 most significant bits
|
||||
called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
|
||||
Zero means a literal tag. All other values mean a copy tag.
|
||||
|
||||
For literal tags:
|
||||
- If m < 60, the next 1 + m bytes are literal bytes.
|
||||
- Otherwise, let n be the little-endian unsigned integer denoted by the next
|
||||
m - 59 bytes. The next 1 + n bytes after that are literal bytes.
|
||||
|
||||
For copy tags, length bytes are copied from offset bytes ago, in the style of
|
||||
Lempel-Ziv compression algorithms. In particular:
|
||||
- For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
|
||||
The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
|
||||
of the offset. The next byte is bits 0-7 of the offset.
|
||||
- For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
|
||||
The length is 1 + m. The offset is the little-endian unsigned integer
|
||||
denoted by the next 2 bytes.
|
||||
- For l == 3, this tag is a legacy format that is no longer issued by most
|
||||
encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in
|
||||
[1, 65). The length is 1 + m. The offset is the little-endian unsigned
|
||||
integer denoted by the next 4 bytes.
|
||||
*/
|
||||
const (
|
||||
tagLiteral = 0x00
|
||||
tagCopy1 = 0x01
|
||||
tagCopy2 = 0x02
|
||||
tagCopy4 = 0x03
|
||||
)
|
||||
|
||||
const (
|
||||
checksumSize = 4
|
||||
chunkHeaderSize = 4
|
||||
magicChunk = "\xff\x06\x00\x00" + magicBody
|
||||
magicBody = "sNaPpY"
|
||||
|
||||
// maxBlockSize is the maximum size of the input to encodeBlock. It is not
|
||||
// part of the wire format per se, but some parts of the encoder assume
|
||||
// that an offset fits into a uint16.
|
||||
//
|
||||
// Also, for the framing format (Writer type instead of Encode function),
|
||||
// https://github.com/google/snappy/blob/master/framing_format.txt says
|
||||
// that "the uncompressed data in a chunk must be no longer than 65536
|
||||
// bytes".
|
||||
maxBlockSize = 65536
|
||||
|
||||
// maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is
|
||||
// hard coded to be a const instead of a variable, so that obufLen can also
|
||||
// be a const. Their equivalence is confirmed by
|
||||
// TestMaxEncodedLenOfMaxBlockSize.
|
||||
maxEncodedLenOfMaxBlockSize = 76490
|
||||
|
||||
obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize
|
||||
obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize
|
||||
)
|
||||
|
||||
const (
|
||||
chunkTypeCompressedData = 0x00
|
||||
chunkTypeUncompressedData = 0x01
|
||||
chunkTypePadding = 0xfe
|
||||
chunkTypeStreamIdentifier = 0xff
|
||||
)
|
||||
|
||||
var crcTable = crc32.MakeTable(crc32.Castagnoli)
|
||||
|
||||
// crc implements the checksum specified in section 3 of
|
||||
// https://github.com/google/snappy/blob/master/framing_format.txt
|
||||
func crc(b []byte) uint32 {
|
||||
c := crc32.Update(0, crcTable, b)
|
||||
return uint32(c>>15|c<<17) + 0xa282ead8
|
||||
}
|
||||
23
vendor/github.com/pkg/errors/LICENSE
generated
vendored
Executable file
@ -0,0 +1,23 @@
|
||||
Copyright (c) 2015, Dave Cheney <dave@cheney.net>
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
52
vendor/github.com/pkg/errors/README.md
generated
vendored
Executable file
@ -0,0 +1,52 @@
|
||||
# errors [](https://travis-ci.org/pkg/errors) [](https://ci.appveyor.com/project/davecheney/errors/branch/master) [](http://godoc.org/github.com/pkg/errors) [](https://goreportcard.com/report/github.com/pkg/errors)
|
||||
|
||||
Package errors provides simple error handling primitives.
|
||||
|
||||
`go get github.com/pkg/errors`
|
||||
|
||||
The traditional error handling idiom in Go is roughly akin to
|
||||
```go
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
```
|
||||
which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error.
|
||||
|
||||
## Adding context to an error
|
||||
|
||||
The errors.Wrap function returns a new error that adds context to the original error. For example
|
||||
```go
|
||||
_, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "read failed")
|
||||
}
|
||||
```
|
||||
## Retrieving the cause of an error
|
||||
|
||||
Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`.
|
||||
```go
|
||||
type causer interface {
|
||||
Cause() error
|
||||
}
|
||||
```
|
||||
`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example:
|
||||
```go
|
||||
switch err := errors.Cause(err).(type) {
|
||||
case *MyError:
|
||||
// handle specifically
|
||||
default:
|
||||
// unknown error
|
||||
}
|
||||
```
|
||||
|
||||
[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors).
|
||||
|
||||
## Contributing
|
||||
|
||||
We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high.
|
||||
|
||||
Before proposing a change, please discuss your change by raising an issue.
|
||||
|
||||
## Licence
|
||||
|
||||
BSD-2-Clause
|
||||
32
vendor/github.com/pkg/errors/appveyor.yml
generated
vendored
Executable file
@ -0,0 +1,32 @@
|
||||
version: build-{build}.{branch}
|
||||
|
||||
clone_folder: C:\gopath\src\github.com\pkg\errors
|
||||
shallow_clone: true # for startup speed
|
||||
|
||||
environment:
|
||||
GOPATH: C:\gopath
|
||||
|
||||
platform:
|
||||
- x64
|
||||
|
||||
# http://www.appveyor.com/docs/installed-software
|
||||
install:
|
||||
# some helpful output for debugging builds
|
||||
- go version
|
||||
- go env
|
||||
# pre-installed MinGW at C:\MinGW is 32bit only
|
||||
# but MSYS2 at C:\msys64 has mingw64
|
||||
- set PATH=C:\msys64\mingw64\bin;%PATH%
|
||||
- gcc --version
|
||||
- g++ --version
|
||||
|
||||
build_script:
|
||||
- go install -v ./...
|
||||
|
||||
test_script:
|
||||
- set PATH=C:\gopath\bin;%PATH%
|
||||
- go test -v ./...
|
||||
|
||||
#artifacts:
|
||||
# - path: '%GOPATH%\bin\*.exe'
|
||||
deploy: off
|
||||
269
vendor/github.com/pkg/errors/errors.go
generated
vendored
Executable file
@ -0,0 +1,269 @@
|
||||
// Package errors provides simple error handling primitives.
|
||||
//
|
||||
// The traditional error handling idiom in Go is roughly akin to
|
||||
//
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// which applied recursively up the call stack results in error reports
|
||||
// without context or debugging information. The errors package allows
|
||||
// programmers to add context to the failure path in their code in a way
|
||||
// that does not destroy the original value of the error.
|
||||
//
|
||||
// Adding context to an error
|
||||
//
|
||||
// The errors.Wrap function returns a new error that adds context to the
|
||||
// original error by recording a stack trace at the point Wrap is called,
|
||||
// and the supplied message. For example
|
||||
//
|
||||
// _, err := ioutil.ReadAll(r)
|
||||
// if err != nil {
|
||||
// return errors.Wrap(err, "read failed")
|
||||
// }
|
||||
//
|
||||
// If additional control is required the errors.WithStack and errors.WithMessage
|
||||
// functions destructure errors.Wrap into its component operations of annotating
|
||||
// an error with a stack trace and an a message, respectively.
|
||||
//
|
||||
// Retrieving the cause of an error
|
||||
//
|
||||
// Using errors.Wrap constructs a stack of errors, adding context to the
|
||||
// preceding error. Depending on the nature of the error it may be necessary
|
||||
// to reverse the operation of errors.Wrap to retrieve the original error
|
||||
// for inspection. Any error value which implements this interface
|
||||
//
|
||||
// type causer interface {
|
||||
// Cause() error
|
||||
// }
|
||||
//
|
||||
// can be inspected by errors.Cause. errors.Cause will recursively retrieve
|
||||
// the topmost error which does not implement causer, which is assumed to be
|
||||
// the original cause. For example:
|
||||
//
|
||||
// switch err := errors.Cause(err).(type) {
|
||||
// case *MyError:
|
||||
// // handle specifically
|
||||
// default:
|
||||
// // unknown error
|
||||
// }
|
||||
//
|
||||
// causer interface is not exported by this package, but is considered a part
|
||||
// of stable public API.
|
||||
//
|
||||
// Formatted printing of errors
|
||||
//
|
||||
// All error values returned from this package implement fmt.Formatter and can
|
||||
// be formatted by the fmt package. The following verbs are supported
|
||||
//
|
||||
// %s print the error. If the error has a Cause it will be
|
||||
// printed recursively
|
||||
// %v see %s
|
||||
// %+v extended format. Each Frame of the error's StackTrace will
|
||||
// be printed in detail.
|
||||
//
|
||||
// Retrieving the stack trace of an error or wrapper
|
||||
//
|
||||
// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are
|
||||
// invoked. This information can be retrieved with the following interface.
|
||||
//
|
||||
// type stackTracer interface {
|
||||
// StackTrace() errors.StackTrace
|
||||
// }
|
||||
//
|
||||
// Where errors.StackTrace is defined as
|
||||
//
|
||||
// type StackTrace []Frame
|
||||
//
|
||||
// The Frame type represents a call site in the stack trace. Frame supports
|
||||
// the fmt.Formatter interface that can be used for printing information about
|
||||
// the stack trace of this error. For example:
|
||||
//
|
||||
// if err, ok := err.(stackTracer); ok {
|
||||
// for _, f := range err.StackTrace() {
|
||||
// fmt.Printf("%+s:%d", f)
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// stackTracer interface is not exported by this package, but is considered a part
|
||||
// of stable public API.
|
||||
//
|
||||
// See the documentation for Frame.Format for more details.
|
||||
package errors
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// New returns an error with the supplied message.
|
||||
// New also records the stack trace at the point it was called.
|
||||
func New(message string) error {
|
||||
return &fundamental{
|
||||
msg: message,
|
||||
stack: callers(),
|
||||
}
|
||||
}
|
||||
|
||||
// Errorf formats according to a format specifier and returns the string
|
||||
// as a value that satisfies error.
|
||||
// Errorf also records the stack trace at the point it was called.
|
||||
func Errorf(format string, args ...interface{}) error {
|
||||
return &fundamental{
|
||||
msg: fmt.Sprintf(format, args...),
|
||||
stack: callers(),
|
||||
}
|
||||
}
|
||||
|
||||
// fundamental is an error that has a message and a stack, but no caller.
|
||||
type fundamental struct {
|
||||
msg string
|
||||
*stack
|
||||
}
|
||||
|
||||
func (f *fundamental) Error() string { return f.msg }
|
||||
|
||||
func (f *fundamental) Format(s fmt.State, verb rune) {
|
||||
switch verb {
|
||||
case 'v':
|
||||
if s.Flag('+') {
|
||||
io.WriteString(s, f.msg)
|
||||
f.stack.Format(s, verb)
|
||||
return
|
||||
}
|
||||
fallthrough
|
||||
case 's':
|
||||
io.WriteString(s, f.msg)
|
||||
case 'q':
|
||||
fmt.Fprintf(s, "%q", f.msg)
|
||||
}
|
||||
}
|
||||
|
||||
// WithStack annotates err with a stack trace at the point WithStack was called.
|
||||
// If err is nil, WithStack returns nil.
|
||||
func WithStack(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return &withStack{
|
||||
err,
|
||||
callers(),
|
||||
}
|
||||
}
|
||||
|
||||
type withStack struct {
|
||||
error
|
||||
*stack
|
||||
}
|
||||
|
||||
func (w *withStack) Cause() error { return w.error }
|
||||
|
||||
func (w *withStack) Format(s fmt.State, verb rune) {
|
||||
switch verb {
|
||||
case 'v':
|
||||
if s.Flag('+') {
|
||||
fmt.Fprintf(s, "%+v", w.Cause())
|
||||
w.stack.Format(s, verb)
|
||||
return
|
||||
}
|
||||
fallthrough
|
||||
case 's':
|
||||
io.WriteString(s, w.Error())
|
||||
case 'q':
|
||||
fmt.Fprintf(s, "%q", w.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Wrap returns an error annotating err with a stack trace
|
||||
// at the point Wrap is called, and the supplied message.
|
||||
// If err is nil, Wrap returns nil.
|
||||
func Wrap(err error, message string) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
err = &withMessage{
|
||||
cause: err,
|
||||
msg: message,
|
||||
}
|
||||
return &withStack{
|
||||
err,
|
||||
callers(),
|
||||
}
|
||||
}
|
||||
|
||||
// Wrapf returns an error annotating err with a stack trace
|
||||
// at the point Wrapf is call, and the format specifier.
|
||||
// If err is nil, Wrapf returns nil.
|
||||
func Wrapf(err error, format string, args ...interface{}) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
err = &withMessage{
|
||||
cause: err,
|
||||
msg: fmt.Sprintf(format, args...),
|
||||
}
|
||||
return &withStack{
|
||||
err,
|
||||
callers(),
|
||||
}
|
||||
}
|
||||
|
||||
// WithMessage annotates err with a new message.
|
||||
// If err is nil, WithMessage returns nil.
|
||||
func WithMessage(err error, message string) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return &withMessage{
|
||||
cause: err,
|
||||
msg: message,
|
||||
}
|
||||
}
|
||||
|
||||
type withMessage struct {
|
||||
cause error
|
||||
msg string
|
||||
}
|
||||
|
||||
func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() }
|
||||
func (w *withMessage) Cause() error { return w.cause }
|
||||
|
||||
func (w *withMessage) Format(s fmt.State, verb rune) {
|
||||
switch verb {
|
||||
case 'v':
|
||||
if s.Flag('+') {
|
||||
fmt.Fprintf(s, "%+v\n", w.Cause())
|
||||
io.WriteString(s, w.msg)
|
||||
return
|
||||
}
|
||||
fallthrough
|
||||
case 's', 'q':
|
||||
io.WriteString(s, w.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Cause returns the underlying cause of the error, if possible.
|
||||
// An error value has a cause if it implements the following
|
||||
// interface:
|
||||
//
|
||||
// type causer interface {
|
||||
// Cause() error
|
||||
// }
|
||||
//
|
||||
// If the error does not implement Cause, the original error will
|
||||
// be returned. If the error is nil, nil will be returned without further
|
||||
// investigation.
|
||||
func Cause(err error) error {
|
||||
type causer interface {
|
||||
Cause() error
|
||||
}
|
||||
|
||||
for err != nil {
|
||||
cause, ok := err.(causer)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
err = cause.Cause()
|
||||
}
|
||||
return err
|
||||
}
|
||||
186
vendor/github.com/pkg/errors/stack.go
generated
vendored
Executable file
@ -0,0 +1,186 @@
|
||||
package errors
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Frame represents a program counter inside a stack frame.
|
||||
type Frame uintptr
|
||||
|
||||
// pc returns the program counter for this frame;
|
||||
// multiple frames may have the same PC value.
|
||||
func (f Frame) pc() uintptr { return uintptr(f) - 1 }
|
||||
|
||||
// file returns the full path to the file that contains the
|
||||
// function for this Frame's pc.
|
||||
func (f Frame) file() string {
|
||||
fn := runtime.FuncForPC(f.pc())
|
||||
if fn == nil {
|
||||
return "unknown"
|
||||
}
|
||||
file, _ := fn.FileLine(f.pc())
|
||||
return file
|
||||
}
|
||||
|
||||
// line returns the line number of source code of the
|
||||
// function for this Frame's pc.
|
||||
func (f Frame) line() int {
|
||||
fn := runtime.FuncForPC(f.pc())
|
||||
if fn == nil {
|
||||
return 0
|
||||
}
|
||||
_, line := fn.FileLine(f.pc())
|
||||
return line
|
||||
}
|
||||
|
||||
// Format formats the frame according to the fmt.Formatter interface.
|
||||
//
|
||||
// %s source file
|
||||
// %d source line
|
||||
// %n function name
|
||||
// %v equivalent to %s:%d
|
||||
//
|
||||
// Format accepts flags that alter the printing of some verbs, as follows:
|
||||
//
|
||||
// %+s path of source file relative to the compile time GOPATH
|
||||
// %+v equivalent to %+s:%d
|
||||
func (f Frame) Format(s fmt.State, verb rune) {
|
||||
switch verb {
|
||||
case 's':
|
||||
switch {
|
||||
case s.Flag('+'):
|
||||
pc := f.pc()
|
||||
fn := runtime.FuncForPC(pc)
|
||||
if fn == nil {
|
||||
io.WriteString(s, "unknown")
|
||||
} else {
|
||||
file, _ := fn.FileLine(pc)
|
||||
fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file)
|
||||
}
|
||||
default:
|
||||
io.WriteString(s, path.Base(f.file()))
|
||||
}
|
||||
case 'd':
|
||||
fmt.Fprintf(s, "%d", f.line())
|
||||
case 'n':
|
||||
name := runtime.FuncForPC(f.pc()).Name()
|
||||
io.WriteString(s, funcname(name))
|
||||
case 'v':
|
||||
f.Format(s, 's')
|
||||
io.WriteString(s, ":")
|
||||
f.Format(s, 'd')
|
||||
}
|
||||
}
|
||||
|
||||
// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
|
||||
type StackTrace []Frame
|
||||
|
||||
// Format formats the stack of Frames according to the fmt.Formatter interface.
|
||||
//
|
||||
// %s lists source files for each Frame in the stack
|
||||
// %v lists the source file and line number for each Frame in the stack
|
||||
//
|
||||
// Format accepts flags that alter the printing of some verbs, as follows:
|
||||
//
|
||||
// %+v Prints filename, function, and line number for each Frame in the stack.
|
||||
func (st StackTrace) Format(s fmt.State, verb rune) {
|
||||
switch verb {
|
||||
case 'v':
|
||||
switch {
|
||||
case s.Flag('+'):
|
||||
for _, f := range st {
|
||||
fmt.Fprintf(s, "\n%+v", f)
|
||||
}
|
||||
case s.Flag('#'):
|
||||
fmt.Fprintf(s, "%#v", []Frame(st))
|
||||
default:
|
||||
fmt.Fprintf(s, "%v", []Frame(st))
|
||||
}
|
||||
case 's':
|
||||
fmt.Fprintf(s, "%s", []Frame(st))
|
||||
}
|
||||
}
|
||||
|
||||
// stack represents a stack of program counters.
|
||||
type stack []uintptr
|
||||
|
||||
func (s *stack) Format(st fmt.State, verb rune) {
|
||||
switch verb {
|
||||
case 'v':
|
||||
switch {
|
||||
case st.Flag('+'):
|
||||
for _, pc := range *s {
|
||||
f := Frame(pc)
|
||||
fmt.Fprintf(st, "\n%+v", f)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stack) StackTrace() StackTrace {
|
||||
f := make([]Frame, len(*s))
|
||||
for i := 0; i < len(f); i++ {
|
||||
f[i] = Frame((*s)[i])
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
func callers() *stack {
|
||||
const depth = 32
|
||||
var pcs [depth]uintptr
|
||||
n := runtime.Callers(3, pcs[:])
|
||||
var st stack = pcs[0:n]
|
||||
return &st
|
||||
}
|
||||
|
||||
// funcname removes the path prefix component of a function's name reported by func.Name().
|
||||
func funcname(name string) string {
|
||||
i := strings.LastIndex(name, "/")
|
||||
name = name[i+1:]
|
||||
i = strings.Index(name, ".")
|
||||
return name[i+1:]
|
||||
}
|
||||
|
||||
func trimGOPATH(name, file string) string {
|
||||
// Here we want to get the source file path relative to the compile time
|
||||
// GOPATH. As of Go 1.6.x there is no direct way to know the compiled
|
||||
// GOPATH at runtime, but we can infer the number of path segments in the
|
||||
// GOPATH. We note that fn.Name() returns the function name qualified by
|
||||
// the import path, which does not include the GOPATH. Thus we can trim
|
||||
// segments from the beginning of the file path until the number of path
|
||||
// separators remaining is one more than the number of path separators in
|
||||
// the function name. For example, given:
|
||||
//
|
||||
// GOPATH /home/user
|
||||
// file /home/user/src/pkg/sub/file.go
|
||||
// fn.Name() pkg/sub.Type.Method
|
||||
//
|
||||
// We want to produce:
|
||||
//
|
||||
// pkg/sub/file.go
|
||||
//
|
||||
// From this we can easily see that fn.Name() has one less path separator
|
||||
// than our desired output. We count separators from the end of the file
|
||||
// path until it finds two more than in the function name and then move
|
||||
// one character forward to preserve the initial path segment without a
|
||||
// leading separator.
|
||||
const sep = "/"
|
||||
goal := strings.Count(name, sep) + 2
|
||||
i := len(file)
|
||||
for n := 0; n < goal; n++ {
|
||||
i = strings.LastIndex(file[:i], sep)
|
||||
if i == -1 {
|
||||
// not enough separators found, set i so that the slice expression
|
||||
// below leaves file unmodified
|
||||
i = -len(sep)
|
||||
break
|
||||
}
|
||||
}
|
||||
// get back to 0 or trim the leading separator
|
||||
file = file[i+len(sep):]
|
||||
return file
|
||||
}
|
||||
27
vendor/github.com/templexxx/cpufeat/LICENSE
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
32
vendor/github.com/templexxx/cpufeat/cpu.go
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package cpu implements processor feature detection
|
||||
// used by the Go standard libary.
|
||||
package cpufeat
|
||||
|
||||
var X86 x86
|
||||
|
||||
// The booleans in x86 contain the correspondingly named cpuid feature bit.
|
||||
// HasAVX and HasAVX2 are only set if the OS does support XMM and YMM registers
|
||||
// in addition to the cpuid feature bit being set.
|
||||
// The struct is padded to avoid false sharing.
|
||||
type x86 struct {
|
||||
_ [CacheLineSize]byte
|
||||
HasAES bool
|
||||
HasAVX bool
|
||||
HasAVX2 bool
|
||||
HasBMI1 bool
|
||||
HasBMI2 bool
|
||||
HasERMS bool
|
||||
HasOSXSAVE bool
|
||||
HasPCLMULQDQ bool
|
||||
HasPOPCNT bool
|
||||
HasSSE2 bool
|
||||
HasSSE3 bool
|
||||
HasSSSE3 bool
|
||||
HasSSE41 bool
|
||||
HasSSE42 bool
|
||||
_ [CacheLineSize]byte
|
||||
}
|
||||
7
vendor/github.com/templexxx/cpufeat/cpu_arm.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cpufeat
|
||||
|
||||
const CacheLineSize = 32
|
||||
7
vendor/github.com/templexxx/cpufeat/cpu_arm64.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cpufeat
|
||||
|
||||
const CacheLineSize = 32
|
||||
7
vendor/github.com/templexxx/cpufeat/cpu_mips.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cpufeat
|
||||
|
||||
const CacheLineSize = 32
|
||||
7
vendor/github.com/templexxx/cpufeat/cpu_mips64.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cpufeat
|
||||
|
||||
const CacheLineSize = 32
|
||||
7
vendor/github.com/templexxx/cpufeat/cpu_mips64le.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cpufeat
|
||||
|
||||
const CacheLineSize = 32
|
||||
7
vendor/github.com/templexxx/cpufeat/cpu_mipsle.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cpufeat
|
||||
|
||||
const CacheLineSize = 32
|
||||
7
vendor/github.com/templexxx/cpufeat/cpu_ppc64.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cpufeat
|
||||
|
||||
const CacheLineSize = 128
|
||||
7
vendor/github.com/templexxx/cpufeat/cpu_ppc64le.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cpufeat
|
||||
|
||||
const CacheLineSize = 128
|
||||
7
vendor/github.com/templexxx/cpufeat/cpu_s390x.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cpufeat
|
||||
|
||||
const CacheLineSize = 256
|
||||
59
vendor/github.com/templexxx/cpufeat/cpu_x86.go
generated
vendored
Normal file
@ -0,0 +1,59 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build 386 amd64 amd64p32
|
||||
|
||||
package cpufeat
|
||||
|
||||
const CacheLineSize = 64
|
||||
|
||||
// cpuid is implemented in cpu_x86.s.
|
||||
func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32)
|
||||
|
||||
// xgetbv with ecx = 0 is implemented in cpu_x86.s.
|
||||
func xgetbv() (eax, edx uint32)
|
||||
|
||||
func init() {
|
||||
maxId, _, _, _ := cpuid(0, 0)
|
||||
|
||||
if maxId < 1 {
|
||||
return
|
||||
}
|
||||
|
||||
_, _, ecx1, edx1 := cpuid(1, 0)
|
||||
X86.HasSSE2 = isSet(26, edx1)
|
||||
|
||||
X86.HasSSE3 = isSet(0, ecx1)
|
||||
X86.HasPCLMULQDQ = isSet(1, ecx1)
|
||||
X86.HasSSSE3 = isSet(9, ecx1)
|
||||
X86.HasSSE41 = isSet(19, ecx1)
|
||||
X86.HasSSE42 = isSet(20, ecx1)
|
||||
X86.HasPOPCNT = isSet(23, ecx1)
|
||||
X86.HasAES = isSet(25, ecx1)
|
||||
X86.HasOSXSAVE = isSet(27, ecx1)
|
||||
|
||||
osSupportsAVX := false
|
||||
// For XGETBV, OSXSAVE bit is required and sufficient.
|
||||
if X86.HasOSXSAVE {
|
||||
eax, _ := xgetbv()
|
||||
// Check if XMM and YMM registers have OS support.
|
||||
osSupportsAVX = isSet(1, eax) && isSet(2, eax)
|
||||
}
|
||||
|
||||
X86.HasAVX = isSet(28, ecx1) && osSupportsAVX
|
||||
|
||||
if maxId < 7 {
|
||||
return
|
||||
}
|
||||
|
||||
_, ebx7, _, _ := cpuid(7, 0)
|
||||
X86.HasBMI1 = isSet(3, ebx7)
|
||||
X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX
|
||||
X86.HasBMI2 = isSet(8, ebx7)
|
||||
X86.HasERMS = isSet(9, ebx7)
|
||||
}
|
||||
|
||||
func isSet(bitpos uint, value uint32) bool {
|
||||
return value&(1<<bitpos) != 0
|
||||
}
|
||||
32
vendor/github.com/templexxx/cpufeat/cpu_x86.s
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build 386 amd64 amd64p32
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32)
|
||||
TEXT ·cpuid(SB), NOSPLIT, $0-24
|
||||
MOVL eaxArg+0(FP), AX
|
||||
MOVL ecxArg+4(FP), CX
|
||||
CPUID
|
||||
MOVL AX, eax+8(FP)
|
||||
MOVL BX, ebx+12(FP)
|
||||
MOVL CX, ecx+16(FP)
|
||||
MOVL DX, edx+20(FP)
|
||||
RET
|
||||
|
||||
// func xgetbv() (eax, edx uint32)
|
||||
TEXT ·xgetbv(SB),NOSPLIT,$0-8
|
||||
#ifdef GOOS_nacl
|
||||
// nacl does not support XGETBV.
|
||||
MOVL $0, eax+0(FP)
|
||||
MOVL $0, edx+4(FP)
|
||||
#else
|
||||
MOVL $0, CX
|
||||
WORD $0x010f; BYTE $0xd0 //XGETBV
|
||||
MOVL AX, eax+0(FP)
|
||||
MOVL DX, edx+4(FP)
|
||||
#endif
|
||||
RET
|
||||
23
vendor/github.com/templexxx/reedsolomon/LICENSE
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2017 Templexxx
|
||||
Copyright (c) 2015 Klaus Post
|
||||
Copyright (c) 2015 Backblaze
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
109
vendor/github.com/templexxx/reedsolomon/README.md
generated
vendored
Normal file
@ -0,0 +1,109 @@
|
||||
# Reed-Solomon
|
||||
|
||||
[![GoDoc][1]][2] [![MIT licensed][3]][4] [![Build Status][5]][6] [![Go Report Card][7]][8]
|
||||
|
||||
[1]: https://godoc.org/github.com/templexxx/reedsolomon?status.svg
|
||||
[2]: https://godoc.org/github.com/templexxx/reedsolomon
|
||||
[3]: https://img.shields.io/badge/license-MIT-blue.svg
|
||||
[4]: LICENSE
|
||||
[5]: https://travis-ci.org/templexxx/reedsolomon.svg?branch=master
|
||||
[6]: https://travis-ci.org/templexxx/reedsolomon
|
||||
[7]: https://goreportcard.com/badge/github.com/templexxx/reedsolomon
|
||||
[8]: https://goreportcard.com/report/github.com/templexxx/reedsolomon
|
||||
|
||||
|
||||
## Introduction:
|
||||
1. Reed-Solomon Erasure Code engine in pure Go.
|
||||
2. Super Fast: more than 10GB/s per physics core ( 10+4, 4KB per vector, Macbook Pro 2.8 GHz Intel Core i7 )
|
||||
|
||||
## Installation
|
||||
To get the package use the standard:
|
||||
```bash
|
||||
go get github.com/templexxx/reedsolomon
|
||||
```
|
||||
|
||||
## Documentation
|
||||
See the associated [GoDoc](http://godoc.org/github.com/templexxx/reedsolomon)
|
||||
|
||||
## Specification
|
||||
### GOARCH
|
||||
1. All arch are supported
|
||||
2. 0.1.0 need go1.9 for sync.Map in AMD64
|
||||
|
||||
### Math
|
||||
1. Coding over in GF(2^8)
|
||||
2. Primitive Polynomial: x^8 + x^4 + x^3 + x^2 + 1 (0x1d)
|
||||
3. mathtool/gentbls.go : generator Primitive Polynomial and it's log table, exp table, multiply table, inverse table etc. We can get more info about how galois field work
|
||||
4. mathtool/cntinverse.go : calculate how many inverse matrix will have in different RS codes config
|
||||
5. Both of Cauchy and Vandermonde Matrix are supported. Vandermonde need more operations for preserving the property that any square subset of rows is invertible
|
||||
|
||||
### Why so fast?
|
||||
These three parts will cost too much time:
|
||||
|
||||
1. lookup galois-field tables
|
||||
2. read/write memory
|
||||
3. calculate inverse matrix in the reconstruct process
|
||||
|
||||
SIMD will solve no.1
|
||||
|
||||
Cache-friendly codes will help to solve no.2 & no.3, and more, use a sync.Map for cache inverse matrix, it will help to save about 1000ns when we need same matrix.
|
||||
|
||||
## Performance
|
||||
|
||||
Performance depends mainly on:
|
||||
|
||||
1. CPU instruction extension( AVX2 or SSSE3 or none )
|
||||
2. number of data/parity vects
|
||||
3. unit size of calculation ( see it in rs_amd64.go )
|
||||
4. size of shards
|
||||
5. speed of memory (waste so much time on read/write mem, :D )
|
||||
6. performance of CPU
|
||||
7. the way of using ( reuse memory)
|
||||
|
||||
And we must know the benchmark test is quite different with encoding/decoding in practice.
|
||||
|
||||
Because in benchmark test loops, the CPU Cache will help a lot. In practice, we must reuse the memory to make the performance become as good as the benchmark test.
|
||||
|
||||
Example of performance on my MacBook 2017 i7 2.8GHz. 10+4 (with 0.1.0).
|
||||
|
||||
### Encoding:
|
||||
|
||||
| Vector size | Speed (MB/S) |
|
||||
|----------------|--------------|
|
||||
| 1400B | 7655.02 |
|
||||
| 4KB | 10551.37 |
|
||||
| 64KB | 9297.25 |
|
||||
| 1MB | 6829.89 |
|
||||
| 16MB | 6312.83 |
|
||||
|
||||
### Reconstruct (use nil to point which one need repair):
|
||||
|
||||
| Vector size | Speed (MB/S) |
|
||||
|----------------|--------------|
|
||||
| 1400B | 4124.85 |
|
||||
| 4KB | 5715.45 |
|
||||
| 64KB | 6050.06 |
|
||||
| 1MB | 5001.21 |
|
||||
| 16MB | 5043.04 |
|
||||
|
||||
### ReconstructWithPos (use a position list to point which one need repair, reuse the memory):
|
||||
|
||||
| Vector size | Speed (MB/S) |
|
||||
|----------------|--------------|
|
||||
| 1400B | 6170.24 |
|
||||
| 4KB | 9444.86 |
|
||||
| 64KB | 9311.30 |
|
||||
| 1MB | 6781.06 |
|
||||
| 16MB | 6285.34 |
|
||||
|
||||
**reconstruct benchmark tests here run with inverse matrix cache, if there is no cache, it will cost more time( about 1000ns)**
|
||||
|
||||
## Who is using this?
|
||||
|
||||
1. https://github.com/xtaci/kcp-go -- A Production-Grade Reliable-UDP Library for golang
|
||||
|
||||
## Links & Thanks
|
||||
* [Klauspost ReedSolomon](https://github.com/klauspost/reedsolomon)
|
||||
* [intel ISA-L](https://github.com/01org/isa-l)
|
||||
* [GF SIMD] (http://www.ssrc.ucsc.edu/papers/plank-fast13.pdf)
|
||||
* [asm2plan9s] (https://github.com/fwessels/asm2plan9s)
|
||||
156
vendor/github.com/templexxx/reedsolomon/matrix.go
generated
vendored
Normal file
@ -0,0 +1,156 @@
|
||||
package reedsolomon
|
||||
|
||||
import "errors"
|
||||
|
||||
type matrix []byte
|
||||
|
||||
func genEncMatrixCauchy(d, p int) matrix {
|
||||
t := d + p
|
||||
m := make([]byte, t*d)
|
||||
for i := 0; i < d; i++ {
|
||||
m[i*d+i] = byte(1)
|
||||
}
|
||||
|
||||
d2 := d * d
|
||||
for i := d; i < t; i++ {
|
||||
for j := 0; j < d; j++ {
|
||||
d := i ^ j
|
||||
a := inverseTbl[d]
|
||||
m[d2] = byte(a)
|
||||
d2++
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func gfExp(b byte, n int) byte {
|
||||
if n == 0 {
|
||||
return 1
|
||||
}
|
||||
if b == 0 {
|
||||
return 0
|
||||
}
|
||||
a := logTbl[b]
|
||||
ret := int(a) * n
|
||||
for ret >= 255 {
|
||||
ret -= 255
|
||||
}
|
||||
return byte(expTbl[ret])
|
||||
}
|
||||
|
||||
func genVandMatrix(vm []byte, t, d int) {
|
||||
for i := 0; i < t; i++ {
|
||||
for j := 0; j < d; j++ {
|
||||
vm[i*d+j] = gfExp(byte(i), j)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m matrix) mul(right matrix, rows, cols int, r []byte) {
|
||||
for i := 0; i < rows; i++ {
|
||||
for j := 0; j < cols; j++ {
|
||||
var v byte
|
||||
for k := 0; k < cols; k++ {
|
||||
v ^= gfMul(m[i*cols+k], right[k*cols+j])
|
||||
}
|
||||
r[i*cols+j] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func genEncMatrixVand(d, p int) (matrix, error) {
|
||||
t := d + p
|
||||
buf := make([]byte, (2*t+4*d)*d)
|
||||
vm := buf[:t*d]
|
||||
genVandMatrix(vm, t, d)
|
||||
top := buf[t*d : (t+d)*d]
|
||||
copy(top, vm[:d*d])
|
||||
raw := buf[(t+d)*d : (t+3*d)*d]
|
||||
im := buf[(t+3*d)*d : (t+4*d)*d]
|
||||
err := matrix(top).invert(raw, d, im)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r := buf[(t+4*d)*d : (2*t+4*d)*d]
|
||||
matrix(vm).mul(im, t, d, r)
|
||||
return matrix(r), nil
|
||||
}
|
||||
|
||||
// [I|m'] -> [m']
|
||||
func (m matrix) subMatrix(n int, r []byte) {
|
||||
for i := 0; i < n; i++ {
|
||||
off := i * n
|
||||
copy(r[off:off+n], m[2*off+n:2*(off+n)])
|
||||
}
|
||||
}
|
||||
|
||||
func (m matrix) invert(raw matrix, n int, im []byte) error {
|
||||
// [m] -> [m|I]
|
||||
for i := 0; i < n; i++ {
|
||||
t := i * n
|
||||
copy(raw[2*t:2*t+n], m[t:t+n])
|
||||
raw[2*t+i+n] = byte(1)
|
||||
}
|
||||
err := gauss(raw, n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
raw.subMatrix(n, im)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m matrix) swap(i, j, n int) {
|
||||
for k := 0; k < n; k++ {
|
||||
m[i*n+k], m[j*n+k] = m[j*n+k], m[i*n+k]
|
||||
}
|
||||
}
|
||||
|
||||
func gfMul(a, b byte) byte {
|
||||
return mulTbl[a][b]
|
||||
}
|
||||
|
||||
var errSingular = errors.New("rs.invert: matrix is singular")
|
||||
|
||||
// [m|I] -> [I|m']
|
||||
func gauss(m matrix, n int) error {
|
||||
n2 := 2 * n
|
||||
for i := 0; i < n; i++ {
|
||||
if m[i*n2+i] == 0 {
|
||||
for j := i + 1; j < n; j++ {
|
||||
if m[j*n2+i] != 0 {
|
||||
m.swap(i, j, n2)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if m[i*n2+i] == 0 {
|
||||
return errSingular
|
||||
}
|
||||
if m[i*n2+i] != 1 {
|
||||
d := m[i*n2+i]
|
||||
scale := inverseTbl[d]
|
||||
for c := 0; c < n2; c++ {
|
||||
m[i*n2+c] = gfMul(m[i*n2+c], scale)
|
||||
}
|
||||
}
|
||||
for j := i + 1; j < n; j++ {
|
||||
if m[j*n2+i] != 0 {
|
||||
scale := m[j*n2+i]
|
||||
for c := 0; c < n2; c++ {
|
||||
m[j*n2+c] ^= gfMul(scale, m[i*n2+c])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for k := 0; k < n; k++ {
|
||||
for j := 0; j < k; j++ {
|
||||
if m[j*n2+k] != 0 {
|
||||
scale := m[j*n2+k]
|
||||
for c := 0; c < n2; c++ {
|
||||
m[j*n2+c] ^= gfMul(scale, m[k*n2+c])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
280
vendor/github.com/templexxx/reedsolomon/rs.go
generated
vendored
Normal file
@ -0,0 +1,280 @@
|
||||
/*
|
||||
Reed-Solomon Codes over GF(2^8)
|
||||
Primitive Polynomial: x^8+x^4+x^3+x^2+1
|
||||
Galois Filed arithmetic using Intel SIMD instructions (AVX2 or SSSE3)
|
||||
*/
|
||||
|
||||
package reedsolomon
|
||||
|
||||
import "errors"
|
||||
|
||||
// Encoder implements for Reed-Solomon Encoding/Reconstructing
|
||||
type Encoder interface {
|
||||
// Encode multiply generator-matrix with data
|
||||
// len(vects) must be equal with num of data+parity
|
||||
Encode(vects [][]byte) error
|
||||
// Result of reconst will be put into origin position of vects
|
||||
// it means if you lost vects[0], after reconst the vects[0]'s data will be back in vects[0]
|
||||
|
||||
// Reconstruct repair lost data & parity
|
||||
// Set vect nil if lost
|
||||
Reconstruct(vects [][]byte) error
|
||||
// Reconstruct repair lost data
|
||||
// Set vect nil if lost
|
||||
ReconstructData(vects [][]byte) error
|
||||
// ReconstWithPos repair lost data&parity with has&lost vects position
|
||||
// Save bandwidth&disk I/O (cmp with Reconstruct, if the lost is less than num of parity)
|
||||
// As erasure codes, we must know which vect is broken,
|
||||
// so it's necessary to provide such APIs
|
||||
// len(has) must equal num of data vects
|
||||
// Example:
|
||||
// in 3+2, the whole position: [0,1,2,3,4]
|
||||
// if lost vects[0]
|
||||
// the "has" could be [1,2,3] or [1,2,4] or ...
|
||||
// then you must be sure that vects[1] vects[2] vects[3] have correct data (if the "has" is [1,2,3])
|
||||
// the "dLost" will be [0]
|
||||
// ps:
|
||||
// 1. the above lists are in increasing orders TODO support out-of-order
|
||||
// 2. each vect has same len, don't set it nil
|
||||
// so we don't need to make slice
|
||||
ReconstWithPos(vects [][]byte, has, dLost, pLost []int) error
|
||||
//// ReconstWithPos repair lost data with survived&lost vects position
|
||||
//// Don't need to append position of parity lost into "lost"
|
||||
ReconstDataWithPos(vects [][]byte, has, dLost []int) error
|
||||
}
|
||||
|
||||
func checkCfg(d, p int) error {
|
||||
if (d <= 0) || (p <= 0) {
|
||||
return errors.New("rs.New: data or parity <= 0")
|
||||
}
|
||||
if d+p >= 256 {
|
||||
return errors.New("rs.New: data+parity >= 256")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// New create an Encoder (vandermonde matrix as Encoding matrix)
|
||||
func New(data, parity int) (enc Encoder, err error) {
|
||||
err = checkCfg(data, parity)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
e, err := genEncMatrixVand(data, parity)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return newRS(data, parity, e), nil
|
||||
}
|
||||
|
||||
// NewCauchy create an Encoder (cauchy matrix as Generator Matrix)
|
||||
func NewCauchy(data, parity int) (enc Encoder, err error) {
|
||||
err = checkCfg(data, parity)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
e := genEncMatrixCauchy(data, parity)
|
||||
return newRS(data, parity, e), nil
|
||||
}
|
||||
|
||||
type encBase struct {
|
||||
data int
|
||||
parity int
|
||||
encode []byte
|
||||
gen []byte
|
||||
}
|
||||
|
||||
func checkEnc(d, p int, vs [][]byte) (size int, err error) {
|
||||
total := len(vs)
|
||||
if d+p != total {
|
||||
err = errors.New("rs.checkER: vects not match rs args")
|
||||
return
|
||||
}
|
||||
size = len(vs[0])
|
||||
if size == 0 {
|
||||
err = errors.New("rs.checkER: vects size = 0")
|
||||
return
|
||||
}
|
||||
for i := 1; i < total; i++ {
|
||||
if len(vs[i]) != size {
|
||||
err = errors.New("rs.checkER: vects size mismatch")
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (e *encBase) Encode(vects [][]byte) (err error) {
|
||||
d := e.data
|
||||
p := e.parity
|
||||
_, err = checkEnc(d, p, vects)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
dv := vects[:d]
|
||||
pv := vects[d:]
|
||||
g := e.gen
|
||||
for i := 0; i < d; i++ {
|
||||
for j := 0; j < p; j++ {
|
||||
if i != 0 {
|
||||
mulVectAdd(g[j*d+i], dv[i], pv[j])
|
||||
} else {
|
||||
mulVect(g[j*d], dv[0], pv[j])
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func mulVect(c byte, a, b []byte) {
|
||||
t := mulTbl[c]
|
||||
for i := 0; i < len(a); i++ {
|
||||
b[i] = t[a[i]]
|
||||
}
|
||||
}
|
||||
|
||||
func mulVectAdd(c byte, a, b []byte) {
|
||||
t := mulTbl[c]
|
||||
for i := 0; i < len(a); i++ {
|
||||
b[i] ^= t[a[i]]
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encBase) Reconstruct(vects [][]byte) (err error) {
|
||||
return e.reconstruct(vects, false)
|
||||
}
|
||||
|
||||
func (e *encBase) ReconstructData(vects [][]byte) (err error) {
|
||||
return e.reconstruct(vects, true)
|
||||
}
|
||||
|
||||
func (e *encBase) ReconstWithPos(vects [][]byte, has, dLost, pLost []int) error {
|
||||
return e.reconstWithPos(vects, has, dLost, pLost, false)
|
||||
}
|
||||
|
||||
func (e *encBase) ReconstDataWithPos(vects [][]byte, has, dLost []int) error {
|
||||
return e.reconstWithPos(vects, has, dLost, nil, true)
|
||||
}
|
||||
|
||||
func (e *encBase) reconst(vects [][]byte, has, dLost, pLost []int, dataOnly bool) (err error) {
|
||||
d := e.data
|
||||
em := e.encode
|
||||
dCnt := len(dLost)
|
||||
size := len(vects[has[0]])
|
||||
if dCnt != 0 {
|
||||
vtmp := make([][]byte, d+dCnt)
|
||||
for i, p := range has {
|
||||
vtmp[i] = vects[p]
|
||||
}
|
||||
for i, p := range dLost {
|
||||
if len(vects[p]) == 0 {
|
||||
vects[p] = make([]byte, size)
|
||||
}
|
||||
vtmp[i+d] = vects[p]
|
||||
}
|
||||
matrixbuf := make([]byte, 4*d*d+dCnt*d)
|
||||
m := matrixbuf[:d*d]
|
||||
for i, l := range has {
|
||||
copy(m[i*d:i*d+d], em[l*d:l*d+d])
|
||||
}
|
||||
raw := matrixbuf[d*d : 3*d*d]
|
||||
im := matrixbuf[3*d*d : 4*d*d]
|
||||
err2 := matrix(m).invert(raw, d, im)
|
||||
if err2 != nil {
|
||||
return err2
|
||||
}
|
||||
g := matrixbuf[4*d*d:]
|
||||
for i, l := range dLost {
|
||||
copy(g[i*d:i*d+d], im[l*d:l*d+d])
|
||||
}
|
||||
etmp := &encBase{data: d, parity: dCnt, gen: g}
|
||||
err2 = etmp.Encode(vtmp[:d+dCnt])
|
||||
if err2 != nil {
|
||||
return err2
|
||||
}
|
||||
}
|
||||
if dataOnly {
|
||||
return
|
||||
}
|
||||
pCnt := len(pLost)
|
||||
if pCnt != 0 {
|
||||
vtmp := make([][]byte, d+pCnt)
|
||||
g := make([]byte, pCnt*d)
|
||||
for i, l := range pLost {
|
||||
copy(g[i*d:i*d+d], em[l*d:l*d+d])
|
||||
}
|
||||
for i := 0; i < d; i++ {
|
||||
vtmp[i] = vects[i]
|
||||
}
|
||||
for i, p := range pLost {
|
||||
if len(vects[p]) == 0 {
|
||||
vects[p] = make([]byte, size)
|
||||
}
|
||||
vtmp[i+d] = vects[p]
|
||||
}
|
||||
etmp := &encBase{data: d, parity: pCnt, gen: g}
|
||||
err2 := etmp.Encode(vtmp[:d+pCnt])
|
||||
if err2 != nil {
|
||||
return err2
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (e *encBase) reconstWithPos(vects [][]byte, has, dLost, pLost []int, dataOnly bool) (err error) {
|
||||
d := e.data
|
||||
p := e.parity
|
||||
// TODO check more, maybe element in has show in lost & deal with len(has) > d
|
||||
if len(has) != d {
|
||||
return errors.New("rs.Reconst: not enough vects")
|
||||
}
|
||||
dCnt := len(dLost)
|
||||
if dCnt > p {
|
||||
return errors.New("rs.Reconst: not enough vects")
|
||||
}
|
||||
pCnt := len(pLost)
|
||||
if pCnt > p {
|
||||
return errors.New("rs.Reconst: not enough vects")
|
||||
}
|
||||
return e.reconst(vects, has, dLost, pLost, dataOnly)
|
||||
}
|
||||
|
||||
func (e *encBase) reconstruct(vects [][]byte, dataOnly bool) (err error) {
|
||||
d := e.data
|
||||
p := e.parity
|
||||
t := d + p
|
||||
listBuf := make([]int, t+p)
|
||||
has := listBuf[:d]
|
||||
dLost := listBuf[d:t]
|
||||
pLost := listBuf[t : t+p]
|
||||
hasCnt, dCnt, pCnt := 0, 0, 0
|
||||
for i := 0; i < t; i++ {
|
||||
if vects[i] != nil {
|
||||
if hasCnt < d {
|
||||
has[hasCnt] = i
|
||||
hasCnt++
|
||||
}
|
||||
} else {
|
||||
if i < d {
|
||||
if dCnt < p {
|
||||
dLost[dCnt] = i
|
||||
dCnt++
|
||||
} else {
|
||||
return errors.New("rs.Reconst: not enough vects")
|
||||
}
|
||||
} else {
|
||||
if pCnt < p {
|
||||
pLost[pCnt] = i
|
||||
pCnt++
|
||||
} else {
|
||||
return errors.New("rs.Reconst: not enough vects")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if hasCnt != d {
|
||||
return errors.New("rs.Reconst: not enough vects")
|
||||
}
|
||||
dLost = dLost[:dCnt]
|
||||
pLost = pLost[:pCnt]
|
||||
return e.reconst(vects, has, dLost, pLost, dataOnly)
|
||||
}
|
||||
868
vendor/github.com/templexxx/reedsolomon/rs_amd64.go
generated
vendored
Normal file
@ -0,0 +1,868 @@
|
||||
package reedsolomon
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"github.com/templexxx/cpufeat"
|
||||
)
|
||||
|
||||
// SIMD Instruction Extensions
|
||||
const (
|
||||
none = iota
|
||||
avx2
|
||||
ssse3
|
||||
)
|
||||
|
||||
var extension = none
|
||||
|
||||
func init() {
|
||||
getEXT()
|
||||
}
|
||||
|
||||
func getEXT() {
|
||||
if cpufeat.X86.HasAVX2 {
|
||||
extension = avx2
|
||||
return
|
||||
} else if cpufeat.X86.HasSSSE3 {
|
||||
extension = ssse3
|
||||
return
|
||||
} else {
|
||||
extension = none
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
//go:noescape
|
||||
func copy32B(dst, src []byte) // Need SSE2(introduced in 2001)
|
||||
|
||||
func initTbl(g matrix, rows, cols int, tbl []byte) {
|
||||
off := 0
|
||||
for i := 0; i < cols; i++ {
|
||||
for j := 0; j < rows; j++ {
|
||||
c := g[j*cols+i]
|
||||
t := lowhighTbl[c][:]
|
||||
copy32B(tbl[off:off+32], t)
|
||||
off += 32
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// At most 3060 inverse matrix (when data=14, parity=4, calc by mathtool/cntinverse)
|
||||
// In practice, data usually below 12, parity below 5
|
||||
func okCache(data, parity int) bool {
|
||||
if data < 15 && parity < 5 { // you can change it, but the data+parity can't be bigger than 32 (tips: see the codes about make inverse matrix)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type (
|
||||
encSSSE3 encSIMD
|
||||
encAVX2 encSIMD
|
||||
encSIMD struct {
|
||||
data int
|
||||
parity int
|
||||
encode matrix
|
||||
gen matrix
|
||||
tbl []byte
|
||||
// inverse matrix cache is design for small vect size ( < 4KB )
|
||||
// it will save time for calculating inverse matrix
|
||||
// but it's not so important for big vect size
|
||||
enableCache bool
|
||||
inverseCache iCache
|
||||
}
|
||||
iCache struct {
|
||||
sync.RWMutex
|
||||
data map[uint32][]byte
|
||||
}
|
||||
)
|
||||
|
||||
func newRS(d, p int, em matrix) (enc Encoder) {
|
||||
g := em[d*d:]
|
||||
if extension == none {
|
||||
return &encBase{data: d, parity: p, encode: em, gen: g}
|
||||
}
|
||||
t := make([]byte, d*p*32)
|
||||
initTbl(g, p, d, t)
|
||||
ok := okCache(d, p)
|
||||
if extension == avx2 {
|
||||
e := &encAVX2{data: d, parity: p, encode: em, gen: g, tbl: t, enableCache: ok,
|
||||
inverseCache: iCache{data: make(map[uint32][]byte)}}
|
||||
return e
|
||||
}
|
||||
e := &encSSSE3{data: d, parity: p, encode: em, gen: g, tbl: t, enableCache: ok,
|
||||
inverseCache: iCache{data: make(map[uint32][]byte)}}
|
||||
return e
|
||||
}
|
||||
|
||||
// Size of sub-vector
|
||||
const unit int = 16 * 1024
|
||||
|
||||
func getDo(n int) int {
|
||||
if n < unit {
|
||||
c := n >> 4
|
||||
if c == 0 {
|
||||
return unit
|
||||
}
|
||||
return c << 4
|
||||
}
|
||||
return unit
|
||||
}
|
||||
|
||||
func (e *encAVX2) Encode(vects [][]byte) (err error) {
|
||||
d := e.data
|
||||
p := e.parity
|
||||
size, err := checkEnc(d, p, vects)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
dv := vects[:d]
|
||||
pv := vects[d:]
|
||||
start, end := 0, 0
|
||||
do := getDo(size)
|
||||
for start < size {
|
||||
end = start + do
|
||||
if end <= size {
|
||||
e.matrixMul(start, end, dv, pv)
|
||||
start = end
|
||||
} else {
|
||||
e.matrixMulRemain(start, size, dv, pv)
|
||||
start = size
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
//go:noescape
|
||||
func mulVectAVX2(tbl, d, p []byte)
|
||||
|
||||
//go:noescape
|
||||
func mulVectAddAVX2(tbl, d, p []byte)
|
||||
|
||||
func (e *encAVX2) matrixMul(start, end int, dv, pv [][]byte) {
|
||||
d := e.data
|
||||
p := e.parity
|
||||
tbl := e.tbl
|
||||
off := 0
|
||||
for i := 0; i < d; i++ {
|
||||
for j := 0; j < p; j++ {
|
||||
t := tbl[off : off+32]
|
||||
if i != 0 {
|
||||
mulVectAddAVX2(t, dv[i][start:end], pv[j][start:end])
|
||||
} else {
|
||||
mulVectAVX2(t, dv[0][start:end], pv[j][start:end])
|
||||
}
|
||||
off += 32
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encAVX2) matrixMulRemain(start, end int, dv, pv [][]byte) {
|
||||
undone := end - start
|
||||
do := (undone >> 4) << 4
|
||||
d := e.data
|
||||
p := e.parity
|
||||
tbl := e.tbl
|
||||
if do >= 16 {
|
||||
end2 := start + do
|
||||
off := 0
|
||||
for i := 0; i < d; i++ {
|
||||
for j := 0; j < p; j++ {
|
||||
t := tbl[off : off+32]
|
||||
if i != 0 {
|
||||
mulVectAddAVX2(t, dv[i][start:end2], pv[j][start:end2])
|
||||
} else {
|
||||
mulVectAVX2(t, dv[0][start:end2], pv[j][start:end2])
|
||||
}
|
||||
off += 32
|
||||
}
|
||||
}
|
||||
start = end
|
||||
}
|
||||
if undone > do {
|
||||
// may recalculate some data, but still improve a lot
|
||||
start2 := end - 16
|
||||
if start2 >= 0 {
|
||||
off := 0
|
||||
for i := 0; i < d; i++ {
|
||||
for j := 0; j < p; j++ {
|
||||
t := tbl[off : off+32]
|
||||
if i != 0 {
|
||||
mulVectAddAVX2(t, dv[i][start2:end], pv[j][start2:end])
|
||||
} else {
|
||||
mulVectAVX2(t, dv[0][start2:end], pv[j][start2:end])
|
||||
}
|
||||
off += 32
|
||||
}
|
||||
}
|
||||
} else {
|
||||
g := e.gen
|
||||
for i := 0; i < d; i++ {
|
||||
for j := 0; j < p; j++ {
|
||||
if i != 0 {
|
||||
mulVectAdd(g[j*d+i], dv[i][start:], pv[j][start:])
|
||||
} else {
|
||||
mulVect(g[j*d], dv[0][start:], pv[j][start:])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// use generator-matrix but not tbls for encoding
|
||||
// it's design for reconstructing
|
||||
// for small vects, it cost to much time on initTbl, so drop it
|
||||
// and for big vects, the tbls can't impact much, because the cache will be filled with vects' data
|
||||
func (e *encAVX2) encodeGen(vects [][]byte) (err error) {
|
||||
d := e.data
|
||||
p := e.parity
|
||||
size, err := checkEnc(d, p, vects)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
dv := vects[:d]
|
||||
pv := vects[d:]
|
||||
start, end := 0, 0
|
||||
do := getDo(size)
|
||||
for start < size {
|
||||
end = start + do
|
||||
if end <= size {
|
||||
e.matrixMulGen(start, end, dv, pv)
|
||||
start = end
|
||||
} else {
|
||||
e.matrixMulRemainGen(start, size, dv, pv)
|
||||
start = size
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (e *encAVX2) matrixMulGen(start, end int, dv, pv [][]byte) {
|
||||
d := e.data
|
||||
p := e.parity
|
||||
g := e.gen
|
||||
for i := 0; i < d; i++ {
|
||||
for j := 0; j < p; j++ {
|
||||
t := lowhighTbl[g[j*d+i]][:]
|
||||
if i != 0 {
|
||||
mulVectAddAVX2(t, dv[i][start:end], pv[j][start:end])
|
||||
} else {
|
||||
mulVectAVX2(t, dv[0][start:end], pv[j][start:end])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encAVX2) matrixMulRemainGen(start, end int, dv, pv [][]byte) {
|
||||
undone := end - start
|
||||
do := (undone >> 4) << 4
|
||||
d := e.data
|
||||
p := e.parity
|
||||
g := e.gen
|
||||
if do >= 16 {
|
||||
end2 := start + do
|
||||
for i := 0; i < d; i++ {
|
||||
for j := 0; j < p; j++ {
|
||||
t := lowhighTbl[g[j*d+i]][:]
|
||||
if i != 0 {
|
||||
mulVectAddAVX2(t, dv[i][start:end2], pv[j][start:end2])
|
||||
} else {
|
||||
mulVectAVX2(t, dv[0][start:end2], pv[j][start:end2])
|
||||
}
|
||||
}
|
||||
}
|
||||
start = end
|
||||
}
|
||||
if undone > do {
|
||||
start2 := end - 16
|
||||
if start2 >= 0 {
|
||||
for i := 0; i < d; i++ {
|
||||
for j := 0; j < p; j++ {
|
||||
t := lowhighTbl[g[j*d+i]][:]
|
||||
if i != 0 {
|
||||
mulVectAddAVX2(t, dv[i][start2:end], pv[j][start2:end])
|
||||
} else {
|
||||
mulVectAVX2(t, dv[0][start2:end], pv[j][start2:end])
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for i := 0; i < d; i++ {
|
||||
for j := 0; j < p; j++ {
|
||||
if i != 0 {
|
||||
mulVectAdd(g[j*d+i], dv[i][start:], pv[j][start:])
|
||||
} else {
|
||||
mulVect(g[j*d], dv[0][start:], pv[j][start:])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encAVX2) Reconstruct(vects [][]byte) (err error) {
|
||||
return e.reconstruct(vects, false)
|
||||
}
|
||||
|
||||
func (e *encAVX2) ReconstructData(vects [][]byte) (err error) {
|
||||
return e.reconstruct(vects, true)
|
||||
}
|
||||
|
||||
func (e *encAVX2) ReconstWithPos(vects [][]byte, has, dLost, pLost []int) error {
|
||||
return e.reconstWithPos(vects, has, dLost, pLost, false)
|
||||
}
|
||||
|
||||
func (e *encAVX2) ReconstDataWithPos(vects [][]byte, has, dLost []int) error {
|
||||
return e.reconstWithPos(vects, has, dLost, nil, true)
|
||||
}
|
||||
|
||||
func (e *encAVX2) makeGen(has, dLost []int) (gen []byte, err error) {
|
||||
d := e.data
|
||||
em := e.encode
|
||||
cnt := len(dLost)
|
||||
if !e.enableCache {
|
||||
matrixbuf := make([]byte, 4*d*d+cnt*d)
|
||||
m := matrixbuf[:d*d]
|
||||
for i, l := range has {
|
||||
copy(m[i*d:i*d+d], em[l*d:l*d+d])
|
||||
}
|
||||
raw := matrixbuf[d*d : 3*d*d]
|
||||
im := matrixbuf[3*d*d : 4*d*d]
|
||||
err2 := matrix(m).invert(raw, d, im)
|
||||
if err2 != nil {
|
||||
return nil, err2
|
||||
}
|
||||
g := matrixbuf[4*d*d:]
|
||||
for i, l := range dLost {
|
||||
copy(g[i*d:i*d+d], im[l*d:l*d+d])
|
||||
}
|
||||
return g, nil
|
||||
}
|
||||
var ikey uint32
|
||||
for _, p := range has {
|
||||
ikey += 1 << uint8(p)
|
||||
}
|
||||
e.inverseCache.RLock()
|
||||
v, ok := e.inverseCache.data[ikey]
|
||||
if ok {
|
||||
im := v
|
||||
g := make([]byte, cnt*d)
|
||||
for i, l := range dLost {
|
||||
copy(g[i*d:i*d+d], im[l*d:l*d+d])
|
||||
}
|
||||
e.inverseCache.RUnlock()
|
||||
return g, nil
|
||||
}
|
||||
e.inverseCache.RUnlock()
|
||||
matrixbuf := make([]byte, 4*d*d+cnt*d)
|
||||
m := matrixbuf[:d*d]
|
||||
for i, l := range has {
|
||||
copy(m[i*d:i*d+d], em[l*d:l*d+d])
|
||||
}
|
||||
raw := matrixbuf[d*d : 3*d*d]
|
||||
im := matrixbuf[3*d*d : 4*d*d]
|
||||
err2 := matrix(m).invert(raw, d, im)
|
||||
if err2 != nil {
|
||||
return nil, err2
|
||||
}
|
||||
e.inverseCache.Lock()
|
||||
e.inverseCache.data[ikey] = im
|
||||
e.inverseCache.Unlock()
|
||||
g := matrixbuf[4*d*d:]
|
||||
for i, l := range dLost {
|
||||
copy(g[i*d:i*d+d], im[l*d:l*d+d])
|
||||
}
|
||||
return g, nil
|
||||
}
|
||||
|
||||
func (e *encAVX2) reconst(vects [][]byte, has, dLost, pLost []int, dataOnly bool) (err error) {
|
||||
d := e.data
|
||||
em := e.encode
|
||||
dCnt := len(dLost)
|
||||
size := len(vects[has[0]])
|
||||
if dCnt != 0 {
|
||||
vtmp := make([][]byte, d+dCnt)
|
||||
for i, p := range has {
|
||||
vtmp[i] = vects[p]
|
||||
}
|
||||
for i, p := range dLost {
|
||||
if len(vects[p]) == 0 {
|
||||
vects[p] = make([]byte, size)
|
||||
}
|
||||
vtmp[i+d] = vects[p]
|
||||
}
|
||||
g, err2 := e.makeGen(has, dLost)
|
||||
if err2 != nil {
|
||||
return
|
||||
}
|
||||
etmp := &encAVX2{data: d, parity: dCnt, gen: g}
|
||||
err2 = etmp.encodeGen(vtmp)
|
||||
if err2 != nil {
|
||||
return err2
|
||||
}
|
||||
}
|
||||
if dataOnly {
|
||||
return
|
||||
}
|
||||
pCnt := len(pLost)
|
||||
if pCnt != 0 {
|
||||
g := make([]byte, pCnt*d)
|
||||
for i, l := range pLost {
|
||||
copy(g[i*d:i*d+d], em[l*d:l*d+d])
|
||||
}
|
||||
vtmp := make([][]byte, d+pCnt)
|
||||
for i := 0; i < d; i++ {
|
||||
vtmp[i] = vects[i]
|
||||
}
|
||||
for i, p := range pLost {
|
||||
if len(vects[p]) == 0 {
|
||||
vects[p] = make([]byte, size)
|
||||
}
|
||||
vtmp[i+d] = vects[p]
|
||||
}
|
||||
etmp := &encAVX2{data: d, parity: pCnt, gen: g}
|
||||
err2 := etmp.encodeGen(vtmp)
|
||||
if err2 != nil {
|
||||
return err2
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (e *encAVX2) reconstWithPos(vects [][]byte, has, dLost, pLost []int, dataOnly bool) (err error) {
|
||||
d := e.data
|
||||
p := e.parity
|
||||
if len(has) != d {
|
||||
return errors.New("rs.Reconst: not enough vects")
|
||||
}
|
||||
dCnt := len(dLost)
|
||||
if dCnt > p {
|
||||
return errors.New("rs.Reconst: not enough vects")
|
||||
}
|
||||
pCnt := len(pLost)
|
||||
if pCnt > p {
|
||||
return errors.New("rs.Reconst: not enough vects")
|
||||
}
|
||||
return e.reconst(vects, has, dLost, pLost, dataOnly)
|
||||
}
|
||||
|
||||
func (e *encAVX2) reconstruct(vects [][]byte, dataOnly bool) (err error) {
|
||||
d := e.data
|
||||
p := e.parity
|
||||
t := d + p
|
||||
listBuf := make([]int, t+p)
|
||||
has := listBuf[:d]
|
||||
dLost := listBuf[d:t]
|
||||
pLost := listBuf[t : t+p]
|
||||
hasCnt, dCnt, pCnt := 0, 0, 0
|
||||
for i := 0; i < t; i++ {
|
||||
if vects[i] != nil {
|
||||
if hasCnt < d {
|
||||
has[hasCnt] = i
|
||||
hasCnt++
|
||||
}
|
||||
} else {
|
||||
if i < d {
|
||||
if dCnt < p {
|
||||
dLost[dCnt] = i
|
||||
dCnt++
|
||||
} else {
|
||||
return errors.New("rs.Reconst: not enough vects")
|
||||
}
|
||||
} else {
|
||||
if pCnt < p {
|
||||
pLost[pCnt] = i
|
||||
pCnt++
|
||||
} else {
|
||||
return errors.New("rs.Reconst: not enough vects")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if hasCnt != d {
|
||||
return errors.New("rs.Reconst: not enough vects")
|
||||
}
|
||||
dLost = dLost[:dCnt]
|
||||
pLost = pLost[:pCnt]
|
||||
return e.reconst(vects, has, dLost, pLost, dataOnly)
|
||||
}
|
||||
|
||||
func (e *encSSSE3) Encode(vects [][]byte) (err error) {
|
||||
d := e.data
|
||||
p := e.parity
|
||||
size, err := checkEnc(d, p, vects)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
dv := vects[:d]
|
||||
pv := vects[d:]
|
||||
start, end := 0, 0
|
||||
do := getDo(size)
|
||||
for start < size {
|
||||
end = start + do
|
||||
if end <= size {
|
||||
e.matrixMul(start, end, dv, pv)
|
||||
start = end
|
||||
} else {
|
||||
e.matrixMulRemain(start, size, dv, pv)
|
||||
start = size
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
//go:noescape
|
||||
func mulVectSSSE3(tbl, d, p []byte)
|
||||
|
||||
//go:noescape
|
||||
func mulVectAddSSSE3(tbl, d, p []byte)
|
||||
|
||||
func (e *encSSSE3) matrixMul(start, end int, dv, pv [][]byte) {
|
||||
d := e.data
|
||||
p := e.parity
|
||||
tbl := e.tbl
|
||||
off := 0
|
||||
for i := 0; i < d; i++ {
|
||||
for j := 0; j < p; j++ {
|
||||
t := tbl[off : off+32]
|
||||
if i != 0 {
|
||||
mulVectAddSSSE3(t, dv[i][start:end], pv[j][start:end])
|
||||
} else {
|
||||
mulVectSSSE3(t, dv[0][start:end], pv[j][start:end])
|
||||
}
|
||||
off += 32
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encSSSE3) matrixMulRemain(start, end int, dv, pv [][]byte) {
|
||||
undone := end - start
|
||||
do := (undone >> 4) << 4
|
||||
d := e.data
|
||||
p := e.parity
|
||||
tbl := e.tbl
|
||||
if do >= 16 {
|
||||
end2 := start + do
|
||||
off := 0
|
||||
for i := 0; i < d; i++ {
|
||||
for j := 0; j < p; j++ {
|
||||
t := tbl[off : off+32]
|
||||
if i != 0 {
|
||||
mulVectAddSSSE3(t, dv[i][start:end2], pv[j][start:end2])
|
||||
} else {
|
||||
mulVectSSSE3(t, dv[0][start:end2], pv[j][start:end2])
|
||||
}
|
||||
off += 32
|
||||
}
|
||||
}
|
||||
start = end
|
||||
}
|
||||
if undone > do {
|
||||
start2 := end - 16
|
||||
if start2 >= 0 {
|
||||
off := 0
|
||||
for i := 0; i < d; i++ {
|
||||
for j := 0; j < p; j++ {
|
||||
t := tbl[off : off+32]
|
||||
if i != 0 {
|
||||
mulVectAddSSSE3(t, dv[i][start2:end], pv[j][start2:end])
|
||||
} else {
|
||||
mulVectSSSE3(t, dv[0][start2:end], pv[j][start2:end])
|
||||
}
|
||||
off += 32
|
||||
}
|
||||
}
|
||||
} else {
|
||||
g := e.gen
|
||||
for i := 0; i < d; i++ {
|
||||
for j := 0; j < p; j++ {
|
||||
if i != 0 {
|
||||
mulVectAdd(g[j*d+i], dv[i][start:], pv[j][start:])
|
||||
} else {
|
||||
mulVect(g[j*d], dv[0][start:], pv[j][start:])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// use generator-matrix but not tbls for encoding
|
||||
// it's design for reconstructing
|
||||
// for small vects, it cost to much time on initTbl, so drop it
|
||||
// and for big vects, the tbls can't impact much, because the cache will be filled with vects' data
|
||||
func (e *encSSSE3) encodeGen(vects [][]byte) (err error) {
|
||||
d := e.data
|
||||
p := e.parity
|
||||
size, err := checkEnc(d, p, vects)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
dv := vects[:d]
|
||||
pv := vects[d:]
|
||||
start, end := 0, 0
|
||||
do := getDo(size)
|
||||
for start < size {
|
||||
end = start + do
|
||||
if end <= size {
|
||||
e.matrixMulGen(start, end, dv, pv)
|
||||
start = end
|
||||
} else {
|
||||
e.matrixMulRemainGen(start, size, dv, pv)
|
||||
start = size
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (e *encSSSE3) matrixMulGen(start, end int, dv, pv [][]byte) {
|
||||
d := e.data
|
||||
p := e.parity
|
||||
g := e.gen
|
||||
for i := 0; i < d; i++ {
|
||||
for j := 0; j < p; j++ {
|
||||
t := lowhighTbl[g[j*d+i]][:]
|
||||
if i != 0 {
|
||||
mulVectAddSSSE3(t, dv[i][start:end], pv[j][start:end])
|
||||
} else {
|
||||
mulVectSSSE3(t, dv[0][start:end], pv[j][start:end])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encSSSE3) matrixMulRemainGen(start, end int, dv, pv [][]byte) {
|
||||
undone := end - start
|
||||
do := (undone >> 4) << 4
|
||||
d := e.data
|
||||
p := e.parity
|
||||
g := e.gen
|
||||
if do >= 16 {
|
||||
end2 := start + do
|
||||
for i := 0; i < d; i++ {
|
||||
for j := 0; j < p; j++ {
|
||||
t := lowhighTbl[g[j*d+i]][:]
|
||||
if i != 0 {
|
||||
mulVectAddSSSE3(t, dv[i][start:end2], pv[j][start:end2])
|
||||
} else {
|
||||
mulVectSSSE3(t, dv[0][start:end2], pv[j][start:end2])
|
||||
}
|
||||
}
|
||||
}
|
||||
start = end
|
||||
}
|
||||
if undone > do {
|
||||
start2 := end - 16
|
||||
if start2 >= 0 {
|
||||
for i := 0; i < d; i++ {
|
||||
for j := 0; j < p; j++ {
|
||||
t := lowhighTbl[g[j*d+i]][:]
|
||||
if i != 0 {
|
||||
mulVectAddSSSE3(t, dv[i][start2:end], pv[j][start2:end])
|
||||
} else {
|
||||
mulVectSSSE3(t, dv[0][start2:end], pv[j][start2:end])
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for i := 0; i < d; i++ {
|
||||
for j := 0; j < p; j++ {
|
||||
if i != 0 {
|
||||
mulVectAdd(g[j*d+i], dv[i][start:], pv[j][start:])
|
||||
} else {
|
||||
mulVect(g[j*d], dv[0][start:], pv[j][start:])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encSSSE3) Reconstruct(vects [][]byte) (err error) {
|
||||
return e.reconstruct(vects, false)
|
||||
}
|
||||
|
||||
func (e *encSSSE3) ReconstructData(vects [][]byte) (err error) {
|
||||
return e.reconstruct(vects, true)
|
||||
}
|
||||
|
||||
func (e *encSSSE3) ReconstWithPos(vects [][]byte, has, dLost, pLost []int) error {
|
||||
return e.reconstWithPos(vects, has, dLost, pLost, false)
|
||||
}
|
||||
|
||||
func (e *encSSSE3) ReconstDataWithPos(vects [][]byte, has, dLost []int) error {
|
||||
return e.reconstWithPos(vects, has, dLost, nil, true)
|
||||
}
|
||||
|
||||
func (e *encSSSE3) makeGen(has, dLost []int) (gen []byte, err error) {
|
||||
d := e.data
|
||||
em := e.encode
|
||||
cnt := len(dLost)
|
||||
if !e.enableCache {
|
||||
matrixbuf := make([]byte, 4*d*d+cnt*d)
|
||||
m := matrixbuf[:d*d]
|
||||
for i, l := range has {
|
||||
copy(m[i*d:i*d+d], em[l*d:l*d+d])
|
||||
}
|
||||
raw := matrixbuf[d*d : 3*d*d]
|
||||
im := matrixbuf[3*d*d : 4*d*d]
|
||||
err2 := matrix(m).invert(raw, d, im)
|
||||
if err2 != nil {
|
||||
return nil, err2
|
||||
}
|
||||
g := matrixbuf[4*d*d:]
|
||||
for i, l := range dLost {
|
||||
copy(g[i*d:i*d+d], im[l*d:l*d+d])
|
||||
}
|
||||
return g, nil
|
||||
}
|
||||
var ikey uint32
|
||||
for _, p := range has {
|
||||
ikey += 1 << uint8(p)
|
||||
}
|
||||
e.inverseCache.RLock()
|
||||
v, ok := e.inverseCache.data[ikey]
|
||||
if ok {
|
||||
im := v
|
||||
g := make([]byte, cnt*d)
|
||||
for i, l := range dLost {
|
||||
copy(g[i*d:i*d+d], im[l*d:l*d+d])
|
||||
}
|
||||
e.inverseCache.RUnlock()
|
||||
return g, nil
|
||||
}
|
||||
e.inverseCache.RUnlock()
|
||||
matrixbuf := make([]byte, 4*d*d+cnt*d)
|
||||
m := matrixbuf[:d*d]
|
||||
for i, l := range has {
|
||||
copy(m[i*d:i*d+d], em[l*d:l*d+d])
|
||||
}
|
||||
raw := matrixbuf[d*d : 3*d*d]
|
||||
im := matrixbuf[3*d*d : 4*d*d]
|
||||
err2 := matrix(m).invert(raw, d, im)
|
||||
if err2 != nil {
|
||||
return nil, err2
|
||||
}
|
||||
e.inverseCache.Lock()
|
||||
e.inverseCache.data[ikey] = im
|
||||
e.inverseCache.Unlock()
|
||||
g := matrixbuf[4*d*d:]
|
||||
for i, l := range dLost {
|
||||
copy(g[i*d:i*d+d], im[l*d:l*d+d])
|
||||
}
|
||||
return g, nil
|
||||
}
|
||||
|
||||
func (e *encSSSE3) reconst(vects [][]byte, has, dLost, pLost []int, dataOnly bool) (err error) {
|
||||
d := e.data
|
||||
em := e.encode
|
||||
dCnt := len(dLost)
|
||||
size := len(vects[has[0]])
|
||||
if dCnt != 0 {
|
||||
vtmp := make([][]byte, d+dCnt)
|
||||
for i, p := range has {
|
||||
vtmp[i] = vects[p]
|
||||
}
|
||||
for i, p := range dLost {
|
||||
if len(vects[p]) == 0 {
|
||||
vects[p] = make([]byte, size)
|
||||
}
|
||||
vtmp[i+d] = vects[p]
|
||||
}
|
||||
g, err2 := e.makeGen(has, dLost)
|
||||
if err2 != nil {
|
||||
return
|
||||
}
|
||||
etmp := &encSSSE3{data: d, parity: dCnt, gen: g}
|
||||
err2 = etmp.encodeGen(vtmp)
|
||||
if err2 != nil {
|
||||
return err2
|
||||
}
|
||||
}
|
||||
if dataOnly {
|
||||
return
|
||||
}
|
||||
pCnt := len(pLost)
|
||||
if pCnt != 0 {
|
||||
g := make([]byte, pCnt*d)
|
||||
for i, l := range pLost {
|
||||
copy(g[i*d:i*d+d], em[l*d:l*d+d])
|
||||
}
|
||||
vtmp := make([][]byte, d+pCnt)
|
||||
for i := 0; i < d; i++ {
|
||||
vtmp[i] = vects[i]
|
||||
}
|
||||
for i, p := range pLost {
|
||||
if len(vects[p]) == 0 {
|
||||
vects[p] = make([]byte, size)
|
||||
}
|
||||
vtmp[i+d] = vects[p]
|
||||
}
|
||||
etmp := &encSSSE3{data: d, parity: pCnt, gen: g}
|
||||
err2 := etmp.encodeGen(vtmp)
|
||||
if err2 != nil {
|
||||
return err2
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (e *encSSSE3) reconstWithPos(vects [][]byte, has, dLost, pLost []int, dataOnly bool) (err error) {
|
||||
d := e.data
|
||||
p := e.parity
|
||||
if len(has) != d {
|
||||
return errors.New("rs.Reconst: not enough vects")
|
||||
}
|
||||
dCnt := len(dLost)
|
||||
if dCnt > p {
|
||||
return errors.New("rs.Reconst: not enough vects")
|
||||
}
|
||||
pCnt := len(pLost)
|
||||
if pCnt > p {
|
||||
return errors.New("rs.Reconst: not enough vects")
|
||||
}
|
||||
return e.reconst(vects, has, dLost, pLost, dataOnly)
|
||||
}
|
||||
|
||||
func (e *encSSSE3) reconstruct(vects [][]byte, dataOnly bool) (err error) {
|
||||
d := e.data
|
||||
p := e.parity
|
||||
t := d + p
|
||||
listBuf := make([]int, t+p)
|
||||
has := listBuf[:d]
|
||||
dLost := listBuf[d:t]
|
||||
pLost := listBuf[t : t+p]
|
||||
hasCnt, dCnt, pCnt := 0, 0, 0
|
||||
for i := 0; i < t; i++ {
|
||||
if vects[i] != nil {
|
||||
if hasCnt < d {
|
||||
has[hasCnt] = i
|
||||
hasCnt++
|
||||
}
|
||||
} else {
|
||||
if i < d {
|
||||
if dCnt < p {
|
||||
dLost[dCnt] = i
|
||||
dCnt++
|
||||
} else {
|
||||
return errors.New("rs.Reconst: not enough vects")
|
||||
}
|
||||
} else {
|
||||
if pCnt < p {
|
||||
pLost[pCnt] = i
|
||||
pCnt++
|
||||
} else {
|
||||
return errors.New("rs.Reconst: not enough vects")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if hasCnt != d {
|
||||
return errors.New("rs.Reconst: not enough vects")
|
||||
}
|
||||
dLost = dLost[:dCnt]
|
||||
pLost = pLost[:pCnt]
|
||||
return e.reconst(vects, has, dLost, pLost, dataOnly)
|
||||
}
|
||||
401
vendor/github.com/templexxx/reedsolomon/rs_amd64.s
generated
vendored
Normal file
@ -0,0 +1,401 @@
|
||||
// Reference: www.ssrc.ucsc.edu/Papers/plank-fast13.pdf
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
#define low_tbl Y0
|
||||
#define high_tbl Y1
|
||||
#define mask Y2
|
||||
#define in0 Y3
|
||||
#define in1 Y4
|
||||
#define in2 Y5
|
||||
#define in3 Y6
|
||||
#define in4 Y7
|
||||
#define in5 Y8
|
||||
#define in0_h Y10
|
||||
#define in1_h Y11
|
||||
#define in2_h Y12
|
||||
#define in3_h Y13
|
||||
#define in4_h Y14
|
||||
#define in5_h Y15
|
||||
|
||||
#define in BX
|
||||
#define out DI
|
||||
#define len R8
|
||||
#define pos R9
|
||||
|
||||
#define tmp0 R10
|
||||
|
||||
#define low_tblx X0
|
||||
#define high_tblx X1
|
||||
#define maskx X2
|
||||
#define in0x X3
|
||||
#define in0_hx X10
|
||||
#define tmp0x X9
|
||||
#define tmp1x X11
|
||||
#define tmp2x X12
|
||||
#define tmp3x X13
|
||||
|
||||
|
||||
// func mulVectAVX2(tbl, d, p []byte)
|
||||
TEXT ·mulVectAVX2(SB), NOSPLIT, $0
|
||||
MOVQ i+24(FP), in
|
||||
MOVQ o+48(FP), out
|
||||
MOVQ tbl+0(FP), tmp0
|
||||
VMOVDQU (tmp0), low_tblx
|
||||
VMOVDQU 16(tmp0), high_tblx
|
||||
MOVB $0x0f, DX
|
||||
LONG $0x2069e3c4; WORD $0x00d2 // VPINSRB $0x00, EDX, XMM2, XMM2
|
||||
VPBROADCASTB maskx, maskx
|
||||
MOVQ in_len+32(FP), len
|
||||
TESTQ $31, len
|
||||
JNZ one16b
|
||||
|
||||
ymm:
|
||||
VINSERTI128 $1, low_tblx, low_tbl, low_tbl
|
||||
VINSERTI128 $1, high_tblx, high_tbl, high_tbl
|
||||
VINSERTI128 $1, maskx, mask, mask
|
||||
TESTQ $255, len
|
||||
JNZ not_aligned
|
||||
|
||||
// 256bytes/loop
|
||||
aligned:
|
||||
MOVQ $0, pos
|
||||
|
||||
loop256b:
|
||||
VMOVDQU (in)(pos*1), in0
|
||||
VPSRLQ $4, in0, in0_h
|
||||
VPAND mask, in0_h, in0_h
|
||||
VPAND mask, in0, in0
|
||||
VPSHUFB in0_h, high_tbl, in0_h
|
||||
VPSHUFB in0, low_tbl, in0
|
||||
VPXOR in0, in0_h, in0
|
||||
VMOVDQU in0, (out)(pos*1)
|
||||
|
||||
VMOVDQU 32(in)(pos*1), in1
|
||||
VPSRLQ $4, in1, in1_h
|
||||
VPAND mask, in1_h, in1_h
|
||||
VPAND mask, in1, in1
|
||||
VPSHUFB in1_h, high_tbl, in1_h
|
||||
VPSHUFB in1, low_tbl, in1
|
||||
VPXOR in1, in1_h, in1
|
||||
VMOVDQU in1, 32(out)(pos*1)
|
||||
|
||||
VMOVDQU 64(in)(pos*1), in2
|
||||
VPSRLQ $4, in2, in2_h
|
||||
VPAND mask, in2_h, in2_h
|
||||
VPAND mask, in2, in2
|
||||
VPSHUFB in2_h, high_tbl, in2_h
|
||||
VPSHUFB in2, low_tbl, in2
|
||||
VPXOR in2, in2_h, in2
|
||||
VMOVDQU in2, 64(out)(pos*1)
|
||||
|
||||
VMOVDQU 96(in)(pos*1), in3
|
||||
VPSRLQ $4, in3, in3_h
|
||||
VPAND mask, in3_h, in3_h
|
||||
VPAND mask, in3, in3
|
||||
VPSHUFB in3_h, high_tbl, in3_h
|
||||
VPSHUFB in3, low_tbl, in3
|
||||
VPXOR in3, in3_h, in3
|
||||
VMOVDQU in3, 96(out)(pos*1)
|
||||
|
||||
VMOVDQU 128(in)(pos*1), in4
|
||||
VPSRLQ $4, in4, in4_h
|
||||
VPAND mask, in4_h, in4_h
|
||||
VPAND mask, in4, in4
|
||||
VPSHUFB in4_h, high_tbl, in4_h
|
||||
VPSHUFB in4, low_tbl, in4
|
||||
VPXOR in4, in4_h, in4
|
||||
VMOVDQU in4, 128(out)(pos*1)
|
||||
|
||||
VMOVDQU 160(in)(pos*1), in5
|
||||
VPSRLQ $4, in5, in5_h
|
||||
VPAND mask, in5_h, in5_h
|
||||
VPAND mask, in5, in5
|
||||
VPSHUFB in5_h, high_tbl, in5_h
|
||||
VPSHUFB in5, low_tbl, in5
|
||||
VPXOR in5, in5_h, in5
|
||||
VMOVDQU in5, 160(out)(pos*1)
|
||||
|
||||
VMOVDQU 192(in)(pos*1), in0
|
||||
VPSRLQ $4, in0, in0_h
|
||||
VPAND mask, in0_h, in0_h
|
||||
VPAND mask, in0, in0
|
||||
VPSHUFB in0_h, high_tbl, in0_h
|
||||
VPSHUFB in0, low_tbl, in0
|
||||
VPXOR in0, in0_h, in0
|
||||
VMOVDQU in0, 192(out)(pos*1)
|
||||
|
||||
VMOVDQU 224(in)(pos*1), in1
|
||||
VPSRLQ $4, in1, in1_h
|
||||
VPAND mask, in1_h, in1_h
|
||||
VPAND mask, in1, in1
|
||||
VPSHUFB in1_h, high_tbl, in1_h
|
||||
VPSHUFB in1, low_tbl, in1
|
||||
VPXOR in1, in1_h, in1
|
||||
VMOVDQU in1, 224(out)(pos*1)
|
||||
|
||||
ADDQ $256, pos
|
||||
CMPQ len, pos
|
||||
JNE loop256b
|
||||
VZEROUPPER
|
||||
RET
|
||||
|
||||
not_aligned:
|
||||
MOVQ len, tmp0
|
||||
ANDQ $255, tmp0
|
||||
|
||||
loop32b:
|
||||
VMOVDQU -32(in)(len*1), in0
|
||||
VPSRLQ $4, in0, in0_h
|
||||
VPAND mask, in0_h, in0_h
|
||||
VPAND mask, in0, in0
|
||||
VPSHUFB in0_h, high_tbl, in0_h
|
||||
VPSHUFB in0, low_tbl, in0
|
||||
VPXOR in0, in0_h, in0
|
||||
VMOVDQU in0, -32(out)(len*1)
|
||||
SUBQ $32, len
|
||||
SUBQ $32, tmp0
|
||||
JG loop32b
|
||||
CMPQ len, $256
|
||||
JGE aligned
|
||||
VZEROUPPER
|
||||
RET
|
||||
|
||||
one16b:
|
||||
VMOVDQU -16(in)(len*1), in0x
|
||||
VPSRLQ $4, in0x, in0_hx
|
||||
VPAND maskx, in0x, in0x
|
||||
VPAND maskx, in0_hx, in0_hx
|
||||
VPSHUFB in0_hx, high_tblx, in0_hx
|
||||
VPSHUFB in0x, low_tblx, in0x
|
||||
VPXOR in0x, in0_hx, in0x
|
||||
VMOVDQU in0x, -16(out)(len*1)
|
||||
SUBQ $16, len
|
||||
CMPQ len, $0
|
||||
JNE ymm
|
||||
RET
|
||||
|
||||
// func mulVectAddAVX2(tbl, d, p []byte)
|
||||
TEXT ·mulVectAddAVX2(SB), NOSPLIT, $0
|
||||
MOVQ i+24(FP), in
|
||||
MOVQ o+48(FP), out
|
||||
MOVQ tbl+0(FP), tmp0
|
||||
VMOVDQU (tmp0), low_tblx
|
||||
VMOVDQU 16(tmp0), high_tblx
|
||||
MOVB $0x0f, DX
|
||||
LONG $0x2069e3c4; WORD $0x00d2
|
||||
VPBROADCASTB maskx, maskx
|
||||
MOVQ in_len+32(FP), len
|
||||
TESTQ $31, len
|
||||
JNZ one16b
|
||||
|
||||
ymm:
|
||||
VINSERTI128 $1, low_tblx, low_tbl, low_tbl
|
||||
VINSERTI128 $1, high_tblx, high_tbl, high_tbl
|
||||
VINSERTI128 $1, maskx, mask, mask
|
||||
TESTQ $255, len
|
||||
JNZ not_aligned
|
||||
|
||||
aligned:
|
||||
MOVQ $0, pos
|
||||
|
||||
loop256b:
|
||||
VMOVDQU (in)(pos*1), in0
|
||||
VPSRLQ $4, in0, in0_h
|
||||
VPAND mask, in0_h, in0_h
|
||||
VPAND mask, in0, in0
|
||||
VPSHUFB in0_h, high_tbl, in0_h
|
||||
VPSHUFB in0, low_tbl, in0
|
||||
VPXOR in0, in0_h, in0
|
||||
VPXOR (out)(pos*1), in0, in0
|
||||
VMOVDQU in0, (out)(pos*1)
|
||||
|
||||
VMOVDQU 32(in)(pos*1), in1
|
||||
VPSRLQ $4, in1, in1_h
|
||||
VPAND mask, in1_h, in1_h
|
||||
VPAND mask, in1, in1
|
||||
VPSHUFB in1_h, high_tbl, in1_h
|
||||
VPSHUFB in1, low_tbl, in1
|
||||
VPXOR in1, in1_h, in1
|
||||
VPXOR 32(out)(pos*1), in1, in1
|
||||
VMOVDQU in1, 32(out)(pos*1)
|
||||
|
||||
VMOVDQU 64(in)(pos*1), in2
|
||||
VPSRLQ $4, in2, in2_h
|
||||
VPAND mask, in2_h, in2_h
|
||||
VPAND mask, in2, in2
|
||||
VPSHUFB in2_h, high_tbl, in2_h
|
||||
VPSHUFB in2, low_tbl, in2
|
||||
VPXOR in2, in2_h, in2
|
||||
VPXOR 64(out)(pos*1), in2, in2
|
||||
VMOVDQU in2, 64(out)(pos*1)
|
||||
|
||||
VMOVDQU 96(in)(pos*1), in3
|
||||
VPSRLQ $4, in3, in3_h
|
||||
VPAND mask, in3_h, in3_h
|
||||
VPAND mask, in3, in3
|
||||
VPSHUFB in3_h, high_tbl, in3_h
|
||||
VPSHUFB in3, low_tbl, in3
|
||||
VPXOR in3, in3_h, in3
|
||||
VPXOR 96(out)(pos*1), in3, in3
|
||||
VMOVDQU in3, 96(out)(pos*1)
|
||||
|
||||
VMOVDQU 128(in)(pos*1), in4
|
||||
VPSRLQ $4, in4, in4_h
|
||||
VPAND mask, in4_h, in4_h
|
||||
VPAND mask, in4, in4
|
||||
VPSHUFB in4_h, high_tbl, in4_h
|
||||
VPSHUFB in4, low_tbl, in4
|
||||
VPXOR in4, in4_h, in4
|
||||
VPXOR 128(out)(pos*1), in4, in4
|
||||
VMOVDQU in4, 128(out)(pos*1)
|
||||
|
||||
VMOVDQU 160(in)(pos*1), in5
|
||||
VPSRLQ $4, in5, in5_h
|
||||
VPAND mask, in5_h, in5_h
|
||||
VPAND mask, in5, in5
|
||||
VPSHUFB in5_h, high_tbl, in5_h
|
||||
VPSHUFB in5, low_tbl, in5
|
||||
VPXOR in5, in5_h, in5
|
||||
VPXOR 160(out)(pos*1), in5, in5
|
||||
VMOVDQU in5, 160(out)(pos*1)
|
||||
|
||||
VMOVDQU 192(in)(pos*1), in0
|
||||
VPSRLQ $4, in0, in0_h
|
||||
VPAND mask, in0_h, in0_h
|
||||
VPAND mask, in0, in0
|
||||
VPSHUFB in0_h, high_tbl, in0_h
|
||||
VPSHUFB in0, low_tbl, in0
|
||||
VPXOR in0, in0_h, in0
|
||||
VPXOR 192(out)(pos*1), in0, in0
|
||||
VMOVDQU in0, 192(out)(pos*1)
|
||||
|
||||
VMOVDQU 224(in)(pos*1), in1
|
||||
VPSRLQ $4, in1, in1_h
|
||||
VPAND mask, in1_h, in1_h
|
||||
VPAND mask, in1, in1
|
||||
VPSHUFB in1_h, high_tbl, in1_h
|
||||
VPSHUFB in1, low_tbl, in1
|
||||
VPXOR in1, in1_h, in1
|
||||
VPXOR 224(out)(pos*1), in1, in1
|
||||
VMOVDQU in1, 224(out)(pos*1)
|
||||
|
||||
ADDQ $256, pos
|
||||
CMPQ len, pos
|
||||
JNE loop256b
|
||||
VZEROUPPER
|
||||
RET
|
||||
|
||||
not_aligned:
|
||||
MOVQ len, tmp0
|
||||
ANDQ $255, tmp0
|
||||
|
||||
loop32b:
|
||||
VMOVDQU -32(in)(len*1), in0
|
||||
VPSRLQ $4, in0, in0_h
|
||||
VPAND mask, in0_h, in0_h
|
||||
VPAND mask, in0, in0
|
||||
VPSHUFB in0_h, high_tbl, in0_h
|
||||
VPSHUFB in0, low_tbl, in0
|
||||
VPXOR in0, in0_h, in0
|
||||
VPXOR -32(out)(len*1), in0, in0
|
||||
VMOVDQU in0, -32(out)(len*1)
|
||||
SUBQ $32, len
|
||||
SUBQ $32, tmp0
|
||||
JG loop32b
|
||||
CMPQ len, $256
|
||||
JGE aligned
|
||||
VZEROUPPER
|
||||
RET
|
||||
|
||||
one16b:
|
||||
VMOVDQU -16(in)(len*1), in0x
|
||||
VPSRLQ $4, in0x, in0_hx
|
||||
VPAND maskx, in0x, in0x
|
||||
VPAND maskx, in0_hx, in0_hx
|
||||
VPSHUFB in0_hx, high_tblx, in0_hx
|
||||
VPSHUFB in0x, low_tblx, in0x
|
||||
VPXOR in0x, in0_hx, in0x
|
||||
VPXOR -16(out)(len*1), in0x, in0x
|
||||
VMOVDQU in0x, -16(out)(len*1)
|
||||
SUBQ $16, len
|
||||
CMPQ len, $0
|
||||
JNE ymm
|
||||
RET
|
||||
|
||||
// func mulVectSSSE3(tbl, d, p []byte)
|
||||
TEXT ·mulVectSSSE3(SB), NOSPLIT, $0
|
||||
MOVQ i+24(FP), in
|
||||
MOVQ o+48(FP), out
|
||||
MOVQ tbl+0(FP), tmp0
|
||||
MOVOU (tmp0), low_tblx
|
||||
MOVOU 16(tmp0), high_tblx
|
||||
MOVB $15, tmp0
|
||||
MOVQ tmp0, maskx
|
||||
PXOR tmp0x, tmp0x
|
||||
PSHUFB tmp0x, maskx
|
||||
MOVQ in_len+32(FP), len
|
||||
SHRQ $4, len
|
||||
|
||||
loop:
|
||||
MOVOU (in), in0x
|
||||
MOVOU in0x, in0_hx
|
||||
PSRLQ $4, in0_hx
|
||||
PAND maskx, in0x
|
||||
PAND maskx, in0_hx
|
||||
MOVOU low_tblx, tmp1x
|
||||
MOVOU high_tblx, tmp2x
|
||||
PSHUFB in0x, tmp1x
|
||||
PSHUFB in0_hx, tmp2x
|
||||
PXOR tmp1x, tmp2x
|
||||
MOVOU tmp2x, (out)
|
||||
ADDQ $16, in
|
||||
ADDQ $16, out
|
||||
SUBQ $1, len
|
||||
JNZ loop
|
||||
RET
|
||||
|
||||
// func mulVectAddSSSE3(tbl, d, p []byte)
|
||||
TEXT ·mulVectAddSSSE3(SB), NOSPLIT, $0
|
||||
MOVQ i+24(FP), in
|
||||
MOVQ o+48(FP), out
|
||||
MOVQ tbl+0(FP), tmp0
|
||||
MOVOU (tmp0), low_tblx
|
||||
MOVOU 16(tmp0), high_tblx
|
||||
MOVB $15, tmp0
|
||||
MOVQ tmp0, maskx
|
||||
PXOR tmp0x, tmp0x
|
||||
PSHUFB tmp0x, maskx
|
||||
MOVQ in_len+32(FP), len
|
||||
SHRQ $4, len
|
||||
|
||||
loop:
|
||||
MOVOU (in), in0x
|
||||
MOVOU in0x, in0_hx
|
||||
PSRLQ $4, in0_hx
|
||||
PAND maskx, in0x
|
||||
PAND maskx, in0_hx
|
||||
MOVOU low_tblx, tmp1x
|
||||
MOVOU high_tblx, tmp2x
|
||||
PSHUFB in0x, tmp1x
|
||||
PSHUFB in0_hx, tmp2x
|
||||
PXOR tmp1x, tmp2x
|
||||
MOVOU (out), tmp3x
|
||||
PXOR tmp3x, tmp2x
|
||||
MOVOU tmp2x, (out)
|
||||
ADDQ $16, in
|
||||
ADDQ $16, out
|
||||
SUBQ $1, len
|
||||
JNZ loop
|
||||
RET
|
||||
|
||||
// func copy32B(dst, src []byte)
|
||||
TEXT ·copy32B(SB), NOSPLIT, $0
|
||||
MOVQ dst+0(FP), SI
|
||||
MOVQ src+24(FP), DX
|
||||
MOVOU (DX), X0
|
||||
MOVOU 16(DX), X1
|
||||
MOVOU X0, (SI)
|
||||
MOVOU X1, 16(SI)
|
||||
RET
|
||||
|
||||
8
vendor/github.com/templexxx/reedsolomon/rs_other.go
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
// +build !amd64
|
||||
|
||||
package reedsolomon
|
||||
|
||||
func newRS(d, p int, em matrix) (enc Encoder) {
|
||||
g := em[d*d:]
|
||||
return &encBase{data: d, parity: p, encode: em, gen: g}
|
||||
}
|
||||
44
vendor/github.com/templexxx/reedsolomon/tbl.go
generated
vendored
Normal file
21
vendor/github.com/templexxx/xor/LICENSE
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2017 Temple3x
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
48
vendor/github.com/templexxx/xor/README.md
generated
vendored
Normal file
@ -0,0 +1,48 @@
|
||||
# XOR
|
||||
|
||||
XOR code engine in pure Go
|
||||
|
||||
more than 10GB/S per core
|
||||
|
||||
## Introduction:
|
||||
|
||||
1. Use SIMD (SSE2 or AVX2) for speeding up
|
||||
2. ...
|
||||
|
||||
## Installation
|
||||
To get the package use the standard:
|
||||
```bash
|
||||
go get github.com/templexxx/xor
|
||||
```
|
||||
|
||||
## Documentation
|
||||
|
||||
See the associated [GoDoc](http://godoc.org/github.com/templexxx/xor)
|
||||
|
||||
|
||||
## Performance
|
||||
|
||||
Performance depends mainly on:
|
||||
|
||||
1. SIMD extension
|
||||
2. unit size of worker
|
||||
3. hardware ( CPU RAM etc)
|
||||
|
||||
Example of performance on my MacBook 2014-mid(i5-4278U 2.6GHz 2 physical cores). The 16MB per shards.
|
||||
```
|
||||
speed = ( shards * size ) / cost
|
||||
```
|
||||
| data_shards | shard_size |speed (MB/S) |
|
||||
|----------|----|-----|
|
||||
| 2 |1KB|64127.95 |
|
||||
|2|1400B|59657.55|
|
||||
|2|16KB|35370.84|
|
||||
| 2 | 16MB|12128.95 |
|
||||
| 5 |1KB| 78837.33 |
|
||||
|5|1400B|58054.89|
|
||||
|5|16KB|50161.19|
|
||||
|5| 16MB|12750.41|
|
||||
|
||||
## Who is using this?
|
||||
|
||||
1. https://github.com/xtaci/kcp-go -- A Production-Grade Reliable-UDP Library for golang
|
||||
438
vendor/github.com/templexxx/xor/avx2_amd64.s
generated
vendored
Normal file
@ -0,0 +1,438 @@
|
||||
#include "textflag.h"
|
||||
|
||||
// addr of mem
|
||||
#define DST BX
|
||||
#define SRC SI
|
||||
#define SRC0 TMP4
|
||||
#define SRC1 TMP5
|
||||
|
||||
// loop args
|
||||
// num of vect
|
||||
#define VECT CX
|
||||
#define LEN DX
|
||||
// pos of matrix
|
||||
#define POS R8
|
||||
|
||||
// tmp store
|
||||
// num of vect or ...
|
||||
#define TMP1 R9
|
||||
// pos of matrix or ...
|
||||
#define TMP2 R10
|
||||
// store addr of data/parity or ...
|
||||
#define TMP3 R11
|
||||
#define TMP4 R12
|
||||
#define TMP5 R13
|
||||
#define TMP6 R14
|
||||
|
||||
// func bytesAVX2mini(dst, src0, src1 []byte, size int)
|
||||
TEXT ·bytesAVX2mini(SB), NOSPLIT, $0
|
||||
MOVQ len+72(FP), LEN
|
||||
CMPQ LEN, $0
|
||||
JE ret
|
||||
MOVQ dst+0(FP), DST
|
||||
MOVQ src0+24(FP), SRC0
|
||||
MOVQ src1+48(FP), SRC1
|
||||
TESTQ $31, LEN
|
||||
JNZ not_aligned
|
||||
|
||||
aligned:
|
||||
MOVQ $0, POS
|
||||
|
||||
loop32b:
|
||||
VMOVDQU (SRC0)(POS*1), Y0
|
||||
VPXOR (SRC1)(POS*1), Y0, Y0
|
||||
VMOVDQU Y0, (DST)(POS*1)
|
||||
ADDQ $32, POS
|
||||
CMPQ LEN, POS
|
||||
JNE loop32b
|
||||
VZEROUPPER
|
||||
RET
|
||||
|
||||
loop_1b:
|
||||
MOVB -1(SRC0)(LEN*1), TMP1
|
||||
MOVB -1(SRC1)(LEN*1), TMP2
|
||||
XORB TMP1, TMP2
|
||||
MOVB TMP2, -1(DST)(LEN*1)
|
||||
SUBQ $1, LEN
|
||||
TESTQ $7, LEN
|
||||
JNZ loop_1b
|
||||
CMPQ LEN, $0
|
||||
JE ret
|
||||
TESTQ $31, LEN
|
||||
JZ aligned
|
||||
|
||||
not_aligned:
|
||||
TESTQ $7, LEN
|
||||
JNE loop_1b
|
||||
MOVQ LEN, TMP1
|
||||
ANDQ $31, TMP1
|
||||
|
||||
loop_8b:
|
||||
MOVQ -8(SRC0)(LEN*1), TMP2
|
||||
MOVQ -8(SRC1)(LEN*1), TMP3
|
||||
XORQ TMP2, TMP3
|
||||
MOVQ TMP3, -8(DST)(LEN*1)
|
||||
SUBQ $8, LEN
|
||||
SUBQ $8, TMP1
|
||||
JG loop_8b
|
||||
|
||||
CMPQ LEN, $32
|
||||
JGE aligned
|
||||
RET
|
||||
|
||||
ret:
|
||||
RET
|
||||
|
||||
// func bytesAVX2small(dst, src0, src1 []byte, size int)
|
||||
TEXT ·bytesAVX2small(SB), NOSPLIT, $0
|
||||
MOVQ len+72(FP), LEN
|
||||
CMPQ LEN, $0
|
||||
JE ret
|
||||
MOVQ dst+0(FP), DST
|
||||
MOVQ src0+24(FP), SRC0
|
||||
MOVQ src1+48(FP), SRC1
|
||||
TESTQ $127, LEN
|
||||
JNZ not_aligned
|
||||
|
||||
aligned:
|
||||
MOVQ $0, POS
|
||||
|
||||
loop128b:
|
||||
VMOVDQU (SRC0)(POS*1), Y0
|
||||
VMOVDQU 32(SRC0)(POS*1), Y1
|
||||
VMOVDQU 64(SRC0)(POS*1), Y2
|
||||
VMOVDQU 96(SRC0)(POS*1), Y3
|
||||
VPXOR (SRC1)(POS*1), Y0, Y0
|
||||
VPXOR 32(SRC1)(POS*1), Y1, Y1
|
||||
VPXOR 64(SRC1)(POS*1), Y2, Y2
|
||||
VPXOR 96(SRC1)(POS*1), Y3, Y3
|
||||
VMOVDQU Y0, (DST)(POS*1)
|
||||
VMOVDQU Y1, 32(DST)(POS*1)
|
||||
VMOVDQU Y2, 64(DST)(POS*1)
|
||||
VMOVDQU Y3, 96(DST)(POS*1)
|
||||
|
||||
ADDQ $128, POS
|
||||
CMPQ LEN, POS
|
||||
JNE loop128b
|
||||
VZEROUPPER
|
||||
RET
|
||||
|
||||
loop_1b:
|
||||
MOVB -1(SRC0)(LEN*1), TMP1
|
||||
MOVB -1(SRC1)(LEN*1), TMP2
|
||||
XORB TMP1, TMP2
|
||||
MOVB TMP2, -1(DST)(LEN*1)
|
||||
SUBQ $1, LEN
|
||||
TESTQ $7, LEN
|
||||
JNZ loop_1b
|
||||
CMPQ LEN, $0
|
||||
JE ret
|
||||
TESTQ $127, LEN
|
||||
JZ aligned
|
||||
|
||||
not_aligned:
|
||||
TESTQ $7, LEN
|
||||
JNE loop_1b
|
||||
MOVQ LEN, TMP1
|
||||
ANDQ $127, TMP1
|
||||
|
||||
loop_8b:
|
||||
MOVQ -8(SRC0)(LEN*1), TMP2
|
||||
MOVQ -8(SRC1)(LEN*1), TMP3
|
||||
XORQ TMP2, TMP3
|
||||
MOVQ TMP3, -8(DST)(LEN*1)
|
||||
SUBQ $8, LEN
|
||||
SUBQ $8, TMP1
|
||||
JG loop_8b
|
||||
|
||||
CMPQ LEN, $128
|
||||
JGE aligned
|
||||
RET
|
||||
|
||||
ret:
|
||||
RET
|
||||
|
||||
// func bytesAVX2big(dst, src0, src1 []byte, size int)
|
||||
TEXT ·bytesAVX2big(SB), NOSPLIT, $0
|
||||
MOVQ len+72(FP), LEN
|
||||
CMPQ LEN, $0
|
||||
JE ret
|
||||
MOVQ dst+0(FP), DST
|
||||
MOVQ src0+24(FP), SRC0
|
||||
MOVQ src1+48(FP), SRC1
|
||||
TESTQ $127, LEN
|
||||
JNZ not_aligned
|
||||
|
||||
aligned:
|
||||
MOVQ $0, POS
|
||||
|
||||
loop128b:
|
||||
VMOVDQU (SRC0)(POS*1), Y0
|
||||
VMOVDQU 32(SRC0)(POS*1), Y1
|
||||
VMOVDQU 64(SRC0)(POS*1), Y2
|
||||
VMOVDQU 96(SRC0)(POS*1), Y3
|
||||
VPXOR (SRC1)(POS*1), Y0, Y0
|
||||
VPXOR 32(SRC1)(POS*1), Y1, Y1
|
||||
VPXOR 64(SRC1)(POS*1), Y2, Y2
|
||||
VPXOR 96(SRC1)(POS*1), Y3, Y3
|
||||
LONG $0xe77da1c4; WORD $0x0304
|
||||
LONG $0xe77da1c4; WORD $0x034c; BYTE $0x20
|
||||
LONG $0xe77da1c4; WORD $0x0354; BYTE $0x40
|
||||
LONG $0xe77da1c4; WORD $0x035c; BYTE $0x60
|
||||
|
||||
ADDQ $128, POS
|
||||
CMPQ LEN, POS
|
||||
JNE loop128b
|
||||
SFENCE
|
||||
VZEROUPPER
|
||||
RET
|
||||
|
||||
loop_1b:
|
||||
MOVB -1(SRC0)(LEN*1), TMP1
|
||||
MOVB -1(SRC1)(LEN*1), TMP2
|
||||
XORB TMP1, TMP2
|
||||
MOVB TMP2, -1(DST)(LEN*1)
|
||||
SUBQ $1, LEN
|
||||
TESTQ $7, LEN
|
||||
JNZ loop_1b
|
||||
CMPQ LEN, $0
|
||||
JE ret
|
||||
TESTQ $127, LEN
|
||||
JZ aligned
|
||||
|
||||
not_aligned:
|
||||
TESTQ $7, LEN
|
||||
JNE loop_1b
|
||||
MOVQ LEN, TMP1
|
||||
ANDQ $127, TMP1
|
||||
|
||||
loop_8b:
|
||||
MOVQ -8(SRC0)(LEN*1), TMP2
|
||||
MOVQ -8(SRC1)(LEN*1), TMP3
|
||||
XORQ TMP2, TMP3
|
||||
MOVQ TMP3, -8(DST)(LEN*1)
|
||||
SUBQ $8, LEN
|
||||
SUBQ $8, TMP1
|
||||
JG loop_8b
|
||||
|
||||
CMPQ LEN, $128
|
||||
JGE aligned
|
||||
RET
|
||||
|
||||
ret:
|
||||
RET
|
||||
|
||||
// func matrixAVX2small(dst []byte, src [][]byte)
|
||||
TEXT ·matrixAVX2small(SB), NOSPLIT, $0
|
||||
MOVQ dst+0(FP), DST
|
||||
MOVQ src+24(FP), SRC
|
||||
MOVQ vec+32(FP), VECT
|
||||
MOVQ len+8(FP), LEN
|
||||
TESTQ $127, LEN
|
||||
JNZ not_aligned
|
||||
|
||||
aligned:
|
||||
MOVQ $0, POS
|
||||
|
||||
loop128b:
|
||||
MOVQ VECT, TMP1
|
||||
SUBQ $2, TMP1
|
||||
MOVQ $0, TMP2
|
||||
MOVQ (SRC)(TMP2*1), TMP3
|
||||
MOVQ TMP3, TMP4
|
||||
VMOVDQU (TMP3)(POS*1), Y0
|
||||
VMOVDQU 32(TMP4)(POS*1), Y1
|
||||
VMOVDQU 64(TMP3)(POS*1), Y2
|
||||
VMOVDQU 96(TMP4)(POS*1), Y3
|
||||
|
||||
next_vect:
|
||||
ADDQ $24, TMP2
|
||||
MOVQ (SRC)(TMP2*1), TMP3
|
||||
MOVQ TMP3, TMP4
|
||||
VMOVDQU (TMP3)(POS*1), Y4
|
||||
VMOVDQU 32(TMP4)(POS*1), Y5
|
||||
VMOVDQU 64(TMP3)(POS*1), Y6
|
||||
VMOVDQU 96(TMP4)(POS*1), Y7
|
||||
VPXOR Y4, Y0, Y0
|
||||
VPXOR Y5, Y1, Y1
|
||||
VPXOR Y6, Y2, Y2
|
||||
VPXOR Y7, Y3, Y3
|
||||
SUBQ $1, TMP1
|
||||
JGE next_vect
|
||||
|
||||
VMOVDQU Y0, (DST)(POS*1)
|
||||
VMOVDQU Y1, 32(DST)(POS*1)
|
||||
VMOVDQU Y2, 64(DST)(POS*1)
|
||||
VMOVDQU Y3, 96(DST)(POS*1)
|
||||
|
||||
ADDQ $128, POS
|
||||
CMPQ LEN, POS
|
||||
JNE loop128b
|
||||
VZEROUPPER
|
||||
RET
|
||||
|
||||
loop_1b:
|
||||
MOVQ VECT, TMP1
|
||||
MOVQ $0, TMP2
|
||||
MOVQ (SRC)(TMP2*1), TMP3
|
||||
SUBQ $2, TMP1
|
||||
MOVB -1(TMP3)(LEN*1), TMP5
|
||||
|
||||
next_vect_1b:
|
||||
ADDQ $24, TMP2
|
||||
MOVQ (SRC)(TMP2*1), TMP3
|
||||
MOVB -1(TMP3)(LEN*1), TMP6
|
||||
XORB TMP6, TMP5
|
||||
SUBQ $1, TMP1
|
||||
JGE next_vect_1b
|
||||
|
||||
MOVB TMP5, -1(DST)(LEN*1)
|
||||
SUBQ $1, LEN
|
||||
TESTQ $7, LEN
|
||||
JNZ loop_1b
|
||||
|
||||
CMPQ LEN, $0
|
||||
JE ret
|
||||
TESTQ $127, LEN
|
||||
JZ aligned
|
||||
|
||||
not_aligned:
|
||||
TESTQ $7, LEN
|
||||
JNE loop_1b
|
||||
MOVQ LEN, TMP4
|
||||
ANDQ $127, TMP4
|
||||
|
||||
loop_8b:
|
||||
MOVQ VECT, TMP1
|
||||
MOVQ $0, TMP2
|
||||
MOVQ (SRC)(TMP2*1), TMP3
|
||||
SUBQ $2, TMP1
|
||||
MOVQ -8(TMP3)(LEN*1), TMP5
|
||||
|
||||
next_vect_8b:
|
||||
ADDQ $24, TMP2
|
||||
MOVQ (SRC)(TMP2*1), TMP3
|
||||
MOVQ -8(TMP3)(LEN*1), TMP6
|
||||
XORQ TMP6, TMP5
|
||||
SUBQ $1, TMP1
|
||||
JGE next_vect_8b
|
||||
|
||||
MOVQ TMP5, -8(DST)(LEN*1)
|
||||
SUBQ $8, LEN
|
||||
SUBQ $8, TMP4
|
||||
JG loop_8b
|
||||
|
||||
CMPQ LEN, $128
|
||||
JGE aligned
|
||||
RET
|
||||
|
||||
ret:
|
||||
RET
|
||||
|
||||
// func matrixAVX2big(dst []byte, src [][]byte)
|
||||
TEXT ·matrixAVX2big(SB), NOSPLIT, $0
|
||||
MOVQ dst+0(FP), DST
|
||||
MOVQ src+24(FP), SRC
|
||||
MOVQ vec+32(FP), VECT
|
||||
MOVQ len+8(FP), LEN
|
||||
TESTQ $127, LEN
|
||||
JNZ not_aligned
|
||||
|
||||
aligned:
|
||||
MOVQ $0, POS
|
||||
|
||||
loop128b:
|
||||
MOVQ VECT, TMP1
|
||||
SUBQ $2, TMP1
|
||||
MOVQ $0, TMP2
|
||||
MOVQ (SRC)(TMP2*1), TMP3
|
||||
MOVQ TMP3, TMP4
|
||||
VMOVDQU (TMP3)(POS*1), Y0
|
||||
VMOVDQU 32(TMP4)(POS*1), Y1
|
||||
VMOVDQU 64(TMP3)(POS*1), Y2
|
||||
VMOVDQU 96(TMP4)(POS*1), Y3
|
||||
|
||||
next_vect:
|
||||
ADDQ $24, TMP2
|
||||
MOVQ (SRC)(TMP2*1), TMP3
|
||||
MOVQ TMP3, TMP4
|
||||
VMOVDQU (TMP3)(POS*1), Y4
|
||||
VMOVDQU 32(TMP4)(POS*1), Y5
|
||||
VMOVDQU 64(TMP3)(POS*1), Y6
|
||||
VMOVDQU 96(TMP4)(POS*1), Y7
|
||||
VPXOR Y4, Y0, Y0
|
||||
VPXOR Y5, Y1, Y1
|
||||
VPXOR Y6, Y2, Y2
|
||||
VPXOR Y7, Y3, Y3
|
||||
SUBQ $1, TMP1
|
||||
JGE next_vect
|
||||
|
||||
LONG $0xe77da1c4; WORD $0x0304 // VMOVNTDQ go1.8 has
|
||||
LONG $0xe77da1c4; WORD $0x034c; BYTE $0x20
|
||||
LONG $0xe77da1c4; WORD $0x0354; BYTE $0x40
|
||||
LONG $0xe77da1c4; WORD $0x035c; BYTE $0x60
|
||||
|
||||
ADDQ $128, POS
|
||||
CMPQ LEN, POS
|
||||
JNE loop128b
|
||||
VZEROUPPER
|
||||
RET
|
||||
|
||||
loop_1b:
|
||||
MOVQ VECT, TMP1
|
||||
MOVQ $0, TMP2
|
||||
MOVQ (SRC)(TMP2*1), TMP3
|
||||
SUBQ $2, TMP1
|
||||
MOVB -1(TMP3)(LEN*1), TMP5
|
||||
|
||||
next_vect_1b:
|
||||
ADDQ $24, TMP2
|
||||
MOVQ (SRC)(TMP2*1), TMP3
|
||||
MOVB -1(TMP3)(LEN*1), TMP6
|
||||
XORB TMP6, TMP5
|
||||
SUBQ $1, TMP1
|
||||
JGE next_vect_1b
|
||||
|
||||
MOVB TMP5, -1(DST)(LEN*1)
|
||||
SUBQ $1, LEN
|
||||
TESTQ $7, LEN
|
||||
JNZ loop_1b
|
||||
|
||||
CMPQ LEN, $0
|
||||
JE ret
|
||||
TESTQ $127, LEN
|
||||
JZ aligned
|
||||
|
||||
not_aligned:
|
||||
TESTQ $7, LEN
|
||||
JNE loop_1b
|
||||
MOVQ LEN, TMP4
|
||||
ANDQ $127, TMP4
|
||||
|
||||
loop_8b:
|
||||
MOVQ VECT, TMP1
|
||||
MOVQ $0, TMP2
|
||||
MOVQ (SRC)(TMP2*1), TMP3
|
||||
SUBQ $2, TMP1
|
||||
MOVQ -8(TMP3)(LEN*1), TMP5
|
||||
|
||||
next_vect_8b:
|
||||
ADDQ $24, TMP2
|
||||
MOVQ (SRC)(TMP2*1), TMP3
|
||||
MOVQ -8(TMP3)(LEN*1), TMP6
|
||||
XORQ TMP6, TMP5
|
||||
SUBQ $1, TMP1
|
||||
JGE next_vect_8b
|
||||
|
||||
MOVQ TMP5, -8(DST)(LEN*1)
|
||||
SUBQ $8, LEN
|
||||
SUBQ $8, TMP4
|
||||
JG loop_8b
|
||||
|
||||
CMPQ LEN, $128
|
||||
JGE aligned
|
||||
RET
|
||||
|
||||
ret:
|
||||
RET
|
||||
|
||||
116
vendor/github.com/templexxx/xor/nosimd.go
generated
vendored
Normal file
@ -0,0 +1,116 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package xor
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const wordSize = int(unsafe.Sizeof(uintptr(0)))
|
||||
const supportsUnaligned = runtime.GOARCH == "386" || runtime.GOARCH == "amd64" || runtime.GOARCH == "ppc64" || runtime.GOARCH == "ppc64le" || runtime.GOARCH == "s390x"
|
||||
|
||||
// xor the bytes in a and b. The destination is assumed to have enough space.
|
||||
func bytesNoSIMD(dst, a, b []byte, size int) {
|
||||
if supportsUnaligned {
|
||||
fastXORBytes(dst, a, b, size)
|
||||
} else {
|
||||
// TODO(hanwen): if (dst, a, b) have common alignment
|
||||
// we could still try fastXORBytes. It is not clear
|
||||
// how often this happens, and it's only worth it if
|
||||
// the block encryption itself is hardware
|
||||
// accelerated.
|
||||
safeXORBytes(dst, a, b, size)
|
||||
}
|
||||
}
|
||||
|
||||
// split slice for cache-friendly
|
||||
const unitSize = 16 * 1024
|
||||
|
||||
func matrixNoSIMD(dst []byte, src [][]byte) {
|
||||
size := len(src[0])
|
||||
start := 0
|
||||
do := unitSize
|
||||
for start < size {
|
||||
end := start + do
|
||||
if end <= size {
|
||||
partNoSIMD(start, end, dst, src)
|
||||
start = start + do
|
||||
} else {
|
||||
partNoSIMD(start, size, dst, src)
|
||||
start = size
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// split vect will improve performance with big data by reducing cache pollution
|
||||
func partNoSIMD(start, end int, dst []byte, src [][]byte) {
|
||||
bytesNoSIMD(dst[start:end], src[0][start:end], src[1][start:end], end-start)
|
||||
for i := 2; i < len(src); i++ {
|
||||
bytesNoSIMD(dst[start:end], dst[start:end], src[i][start:end], end-start)
|
||||
}
|
||||
}
|
||||
|
||||
// fastXORBytes xor in bulk. It only works on architectures that
|
||||
// support unaligned read/writes.
|
||||
func fastXORBytes(dst, a, b []byte, n int) {
|
||||
w := n / wordSize
|
||||
if w > 0 {
|
||||
wordBytes := w * wordSize
|
||||
fastXORWords(dst[:wordBytes], a[:wordBytes], b[:wordBytes])
|
||||
}
|
||||
for i := n - n%wordSize; i < n; i++ {
|
||||
dst[i] = a[i] ^ b[i]
|
||||
}
|
||||
}
|
||||
|
||||
func safeXORBytes(dst, a, b []byte, n int) {
|
||||
ex := n % 8
|
||||
for i := 0; i < ex; i++ {
|
||||
dst[i] = a[i] ^ b[i]
|
||||
}
|
||||
|
||||
for i := ex; i < n; i += 8 {
|
||||
_dst := dst[i : i+8]
|
||||
_a := a[i : i+8]
|
||||
_b := b[i : i+8]
|
||||
_dst[0] = _a[0] ^ _b[0]
|
||||
_dst[1] = _a[1] ^ _b[1]
|
||||
_dst[2] = _a[2] ^ _b[2]
|
||||
_dst[3] = _a[3] ^ _b[3]
|
||||
|
||||
_dst[4] = _a[4] ^ _b[4]
|
||||
_dst[5] = _a[5] ^ _b[5]
|
||||
_dst[6] = _a[6] ^ _b[6]
|
||||
_dst[7] = _a[7] ^ _b[7]
|
||||
}
|
||||
}
|
||||
|
||||
// fastXORWords XORs multiples of 4 or 8 bytes (depending on architecture.)
|
||||
// The arguments are assumed to be of equal length.
|
||||
func fastXORWords(dst, a, b []byte) {
|
||||
dw := *(*[]uintptr)(unsafe.Pointer(&dst))
|
||||
aw := *(*[]uintptr)(unsafe.Pointer(&a))
|
||||
bw := *(*[]uintptr)(unsafe.Pointer(&b))
|
||||
n := len(b) / wordSize
|
||||
ex := n % 8
|
||||
for i := 0; i < ex; i++ {
|
||||
dw[i] = aw[i] ^ bw[i]
|
||||
}
|
||||
|
||||
for i := ex; i < n; i += 8 {
|
||||
_dw := dw[i : i+8]
|
||||
_aw := aw[i : i+8]
|
||||
_bw := bw[i : i+8]
|
||||
_dw[0] = _aw[0] ^ _bw[0]
|
||||
_dw[1] = _aw[1] ^ _bw[1]
|
||||
_dw[2] = _aw[2] ^ _bw[2]
|
||||
_dw[3] = _aw[3] ^ _bw[3]
|
||||
_dw[4] = _aw[4] ^ _bw[4]
|
||||
_dw[5] = _aw[5] ^ _bw[5]
|
||||
_dw[6] = _aw[6] ^ _bw[6]
|
||||
_dw[7] = _aw[7] ^ _bw[7]
|
||||
}
|
||||
}
|
||||
574
vendor/github.com/templexxx/xor/sse2_amd64.s
generated
vendored
Normal file
@ -0,0 +1,574 @@
|
||||
#include "textflag.h"
|
||||
|
||||
// addr of mem
|
||||
#define DST BX
|
||||
#define SRC SI
|
||||
#define SRC0 TMP4
|
||||
#define SRC1 TMP5
|
||||
|
||||
// loop args
|
||||
// num of vect
|
||||
#define VECT CX
|
||||
#define LEN DX
|
||||
// pos of matrix
|
||||
#define POS R8
|
||||
|
||||
// tmp store
|
||||
// num of vect or ...
|
||||
#define TMP1 R9
|
||||
// pos of matrix or ...
|
||||
#define TMP2 R10
|
||||
// store addr of data/parity or ...
|
||||
#define TMP3 R11
|
||||
#define TMP4 R12
|
||||
#define TMP5 R13
|
||||
#define TMP6 R14
|
||||
|
||||
// func bytesSrc0(dst, src0, src1 []byte)
|
||||
TEXT ·xorSrc0(SB), NOSPLIT, $0
|
||||
MOVQ len+32(FP), LEN
|
||||
CMPQ LEN, $0
|
||||
JE ret
|
||||
MOVQ dst+0(FP), DST
|
||||
MOVQ src0+24(FP), SRC0
|
||||
MOVQ src1+48(FP), SRC1
|
||||
TESTQ $15, LEN
|
||||
JNZ not_aligned
|
||||
|
||||
aligned:
|
||||
MOVQ $0, POS
|
||||
|
||||
loop16b:
|
||||
MOVOU (SRC0)(POS*1), X0
|
||||
XORPD (SRC1)(POS*1), X0
|
||||
MOVOU X0, (DST)(POS*1)
|
||||
ADDQ $16, POS
|
||||
CMPQ LEN, POS
|
||||
JNE loop16b
|
||||
RET
|
||||
|
||||
loop_1b:
|
||||
MOVB -1(SRC0)(LEN*1), TMP1
|
||||
MOVB -1(SRC1)(LEN*1), TMP2
|
||||
XORB TMP1, TMP2
|
||||
MOVB TMP2, -1(DST)(LEN*1)
|
||||
SUBQ $1, LEN
|
||||
TESTQ $7, LEN
|
||||
JNZ loop_1b
|
||||
CMPQ LEN, $0
|
||||
JE ret
|
||||
TESTQ $15, LEN
|
||||
JZ aligned
|
||||
|
||||
not_aligned:
|
||||
TESTQ $7, LEN
|
||||
JNE loop_1b
|
||||
MOVQ LEN, TMP1
|
||||
ANDQ $15, TMP1
|
||||
|
||||
loop_8b:
|
||||
MOVQ -8(SRC0)(LEN*1), TMP2
|
||||
MOVQ -8(SRC1)(LEN*1), TMP3
|
||||
XORQ TMP2, TMP3
|
||||
MOVQ TMP3, -8(DST)(LEN*1)
|
||||
SUBQ $8, LEN
|
||||
SUBQ $8, TMP1
|
||||
JG loop_8b
|
||||
|
||||
CMPQ LEN, $16
|
||||
JGE aligned
|
||||
RET
|
||||
|
||||
ret:
|
||||
RET
|
||||
|
||||
// func bytesSrc1(dst, src0, src1 []byte)
|
||||
TEXT ·xorSrc1(SB), NOSPLIT, $0
|
||||
MOVQ len+56(FP), LEN
|
||||
CMPQ LEN, $0
|
||||
JE ret
|
||||
MOVQ dst+0(FP), DST
|
||||
MOVQ src0+24(FP), SRC0
|
||||
MOVQ src1+48(FP), SRC1
|
||||
TESTQ $15, LEN
|
||||
JNZ not_aligned
|
||||
|
||||
aligned:
|
||||
MOVQ $0, POS
|
||||
|
||||
loop16b:
|
||||
MOVOU (SRC0)(POS*1), X0
|
||||
XORPD (SRC1)(POS*1), X0
|
||||
MOVOU X0, (DST)(POS*1)
|
||||
ADDQ $16, POS
|
||||
CMPQ LEN, POS
|
||||
JNE loop16b
|
||||
RET
|
||||
|
||||
loop_1b:
|
||||
MOVB -1(SRC0)(LEN*1), TMP1
|
||||
MOVB -1(SRC1)(LEN*1), TMP2
|
||||
XORB TMP1, TMP2
|
||||
MOVB TMP2, -1(DST)(LEN*1)
|
||||
SUBQ $1, LEN
|
||||
TESTQ $7, LEN
|
||||
JNZ loop_1b
|
||||
CMPQ LEN, $0
|
||||
JE ret
|
||||
TESTQ $15, LEN
|
||||
JZ aligned
|
||||
|
||||
not_aligned:
|
||||
TESTQ $7, LEN
|
||||
JNE loop_1b
|
||||
MOVQ LEN, TMP1
|
||||
ANDQ $15, TMP1
|
||||
|
||||
loop_8b:
|
||||
MOVQ -8(SRC0)(LEN*1), TMP2
|
||||
MOVQ -8(SRC1)(LEN*1), TMP3
|
||||
XORQ TMP2, TMP3
|
||||
MOVQ TMP3, -8(DST)(LEN*1)
|
||||
SUBQ $8, LEN
|
||||
SUBQ $8, TMP1
|
||||
JG loop_8b
|
||||
|
||||
CMPQ LEN, $16
|
||||
JGE aligned
|
||||
RET
|
||||
|
||||
ret:
|
||||
RET
|
||||
|
||||
// func bytesSSE2mini(dst, src0, src1 []byte, size int)
|
||||
TEXT ·bytesSSE2mini(SB), NOSPLIT, $0
|
||||
MOVQ len+72(FP), LEN
|
||||
CMPQ LEN, $0
|
||||
JE ret
|
||||
MOVQ dst+0(FP), DST
|
||||
MOVQ src0+24(FP), SRC0
|
||||
MOVQ src1+48(FP), SRC1
|
||||
TESTQ $15, LEN
|
||||
JNZ not_aligned
|
||||
|
||||
aligned:
|
||||
MOVQ $0, POS
|
||||
|
||||
loop16b:
|
||||
MOVOU (SRC0)(POS*1), X0
|
||||
XORPD (SRC1)(POS*1), X0
|
||||
|
||||
// MOVOU (SRC1)(POS*1), X4
|
||||
// PXOR X4, X0
|
||||
MOVOU X0, (DST)(POS*1)
|
||||
ADDQ $16, POS
|
||||
CMPQ LEN, POS
|
||||
JNE loop16b
|
||||
RET
|
||||
|
||||
loop_1b:
|
||||
MOVB -1(SRC0)(LEN*1), TMP1
|
||||
MOVB -1(SRC1)(LEN*1), TMP2
|
||||
XORB TMP1, TMP2
|
||||
MOVB TMP2, -1(DST)(LEN*1)
|
||||
SUBQ $1, LEN
|
||||
TESTQ $7, LEN
|
||||
JNZ loop_1b
|
||||
CMPQ LEN, $0
|
||||
JE ret
|
||||
TESTQ $15, LEN
|
||||
JZ aligned
|
||||
|
||||
not_aligned:
|
||||
TESTQ $7, LEN
|
||||
JNE loop_1b
|
||||
MOVQ LEN, TMP1
|
||||
ANDQ $15, TMP1
|
||||
|
||||
loop_8b:
|
||||
MOVQ -8(SRC0)(LEN*1), TMP2
|
||||
MOVQ -8(SRC1)(LEN*1), TMP3
|
||||
XORQ TMP2, TMP3
|
||||
MOVQ TMP3, -8(DST)(LEN*1)
|
||||
SUBQ $8, LEN
|
||||
SUBQ $8, TMP1
|
||||
JG loop_8b
|
||||
|
||||
CMPQ LEN, $16
|
||||
JGE aligned
|
||||
RET
|
||||
|
||||
ret:
|
||||
RET
|
||||
|
||||
// func bytesSSE2small(dst, src0, src1 []byte, size int)
|
||||
TEXT ·bytesSSE2small(SB), NOSPLIT, $0
|
||||
MOVQ len+72(FP), LEN
|
||||
CMPQ LEN, $0
|
||||
JE ret
|
||||
MOVQ dst+0(FP), DST
|
||||
MOVQ src0+24(FP), SRC0
|
||||
MOVQ src1+48(FP), SRC1
|
||||
TESTQ $63, LEN
|
||||
JNZ not_aligned
|
||||
|
||||
aligned:
|
||||
MOVQ $0, POS
|
||||
|
||||
loop64b:
|
||||
MOVOU (SRC0)(POS*1), X0
|
||||
MOVOU 16(SRC0)(POS*1), X1
|
||||
MOVOU 32(SRC0)(POS*1), X2
|
||||
MOVOU 48(SRC0)(POS*1), X3
|
||||
|
||||
MOVOU (SRC1)(POS*1), X4
|
||||
MOVOU 16(SRC1)(POS*1), X5
|
||||
MOVOU 32(SRC1)(POS*1), X6
|
||||
MOVOU 48(SRC1)(POS*1), X7
|
||||
|
||||
PXOR X4, X0
|
||||
PXOR X5, X1
|
||||
PXOR X6, X2
|
||||
PXOR X7, X3
|
||||
|
||||
MOVOU X0, (DST)(POS*1)
|
||||
MOVOU X1, 16(DST)(POS*1)
|
||||
MOVOU X2, 32(DST)(POS*1)
|
||||
MOVOU X3, 48(DST)(POS*1)
|
||||
|
||||
ADDQ $64, POS
|
||||
CMPQ LEN, POS
|
||||
JNE loop64b
|
||||
RET
|
||||
|
||||
loop_1b:
|
||||
MOVB -1(SRC0)(LEN*1), TMP1
|
||||
MOVB -1(SRC1)(LEN*1), TMP2
|
||||
XORB TMP1, TMP2
|
||||
MOVB TMP2, -1(DST)(LEN*1)
|
||||
SUBQ $1, LEN
|
||||
TESTQ $7, LEN
|
||||
JNZ loop_1b
|
||||
CMPQ LEN, $0
|
||||
JE ret
|
||||
TESTQ $63, LEN
|
||||
JZ aligned
|
||||
|
||||
not_aligned:
|
||||
TESTQ $7, LEN
|
||||
JNE loop_1b
|
||||
MOVQ LEN, TMP1
|
||||
ANDQ $63, TMP1
|
||||
|
||||
loop_8b:
|
||||
MOVQ -8(SRC0)(LEN*1), TMP2
|
||||
MOVQ -8(SRC1)(LEN*1), TMP3
|
||||
XORQ TMP2, TMP3
|
||||
MOVQ TMP3, -8(DST)(LEN*1)
|
||||
SUBQ $8, LEN
|
||||
SUBQ $8, TMP1
|
||||
JG loop_8b
|
||||
|
||||
CMPQ LEN, $64
|
||||
JGE aligned
|
||||
RET
|
||||
|
||||
ret:
|
||||
RET
|
||||
|
||||
// func bytesSSE2big(dst, src0, src1 []byte, size int)
|
||||
TEXT ·bytesSSE2big(SB), NOSPLIT, $0
|
||||
MOVQ len+72(FP), LEN
|
||||
CMPQ LEN, $0
|
||||
JE ret
|
||||
MOVQ dst+0(FP), DST
|
||||
MOVQ src0+24(FP), SRC0
|
||||
MOVQ src1+48(FP), SRC1
|
||||
TESTQ $63, LEN
|
||||
JNZ not_aligned
|
||||
|
||||
aligned:
|
||||
MOVQ $0, POS
|
||||
|
||||
loop64b:
|
||||
MOVOU (SRC0)(POS*1), X0
|
||||
MOVOU 16(SRC0)(POS*1), X1
|
||||
MOVOU 32(SRC0)(POS*1), X2
|
||||
MOVOU 48(SRC0)(POS*1), X3
|
||||
|
||||
MOVOU (SRC1)(POS*1), X4
|
||||
MOVOU 16(SRC1)(POS*1), X5
|
||||
MOVOU 32(SRC1)(POS*1), X6
|
||||
MOVOU 48(SRC1)(POS*1), X7
|
||||
|
||||
PXOR X4, X0
|
||||
PXOR X5, X1
|
||||
PXOR X6, X2
|
||||
PXOR X7, X3
|
||||
|
||||
LONG $0xe70f4266; WORD $0x0304 // MOVNTDQ
|
||||
LONG $0xe70f4266; WORD $0x034c; BYTE $0x10
|
||||
LONG $0xe70f4266; WORD $0x0354; BYTE $0x20
|
||||
LONG $0xe70f4266; WORD $0x035c; BYTE $0x30
|
||||
|
||||
ADDQ $64, POS
|
||||
CMPQ LEN, POS
|
||||
JNE loop64b
|
||||
RET
|
||||
|
||||
loop_1b:
|
||||
MOVB -1(SRC0)(LEN*1), TMP1
|
||||
MOVB -1(SRC1)(LEN*1), TMP2
|
||||
XORB TMP1, TMP2
|
||||
MOVB TMP2, -1(DST)(LEN*1)
|
||||
SUBQ $1, LEN
|
||||
TESTQ $7, LEN
|
||||
JNZ loop_1b
|
||||
CMPQ LEN, $0
|
||||
JE ret
|
||||
TESTQ $63, LEN
|
||||
JZ aligned
|
||||
|
||||
not_aligned:
|
||||
TESTQ $7, LEN
|
||||
JNE loop_1b
|
||||
MOVQ LEN, TMP1
|
||||
ANDQ $63, TMP1
|
||||
|
||||
loop_8b:
|
||||
MOVQ -8(SRC0)(LEN*1), TMP2
|
||||
MOVQ -8(SRC1)(LEN*1), TMP3
|
||||
XORQ TMP2, TMP3
|
||||
MOVQ TMP3, -8(DST)(LEN*1)
|
||||
SUBQ $8, LEN
|
||||
SUBQ $8, TMP1
|
||||
JG loop_8b
|
||||
|
||||
CMPQ LEN, $64
|
||||
JGE aligned
|
||||
RET
|
||||
|
||||
ret:
|
||||
RET
|
||||
|
||||
// func matrixSSE2small(dst []byte, src [][]byte)
|
||||
TEXT ·matrixSSE2small(SB), NOSPLIT, $0
|
||||
MOVQ dst+0(FP), DST
|
||||
MOVQ src+24(FP), SRC
|
||||
MOVQ vec+32(FP), VECT
|
||||
MOVQ len+8(FP), LEN
|
||||
TESTQ $63, LEN
|
||||
JNZ not_aligned
|
||||
|
||||
aligned:
|
||||
MOVQ $0, POS
|
||||
|
||||
loop64b:
|
||||
MOVQ VECT, TMP1
|
||||
SUBQ $2, TMP1
|
||||
MOVQ $0, TMP2
|
||||
MOVQ (SRC)(TMP2*1), TMP3
|
||||
MOVQ TMP3, TMP4
|
||||
MOVOU (TMP3)(POS*1), X0
|
||||
MOVOU 16(TMP4)(POS*1), X1
|
||||
MOVOU 32(TMP3)(POS*1), X2
|
||||
MOVOU 48(TMP4)(POS*1), X3
|
||||
|
||||
next_vect:
|
||||
ADDQ $24, TMP2
|
||||
MOVQ (SRC)(TMP2*1), TMP3
|
||||
MOVQ TMP3, TMP4
|
||||
MOVOU (TMP3)(POS*1), X4
|
||||
MOVOU 16(TMP4)(POS*1), X5
|
||||
MOVOU 32(TMP3)(POS*1), X6
|
||||
MOVOU 48(TMP4)(POS*1), X7
|
||||
PXOR X4, X0
|
||||
PXOR X5, X1
|
||||
PXOR X6, X2
|
||||
PXOR X7, X3
|
||||
SUBQ $1, TMP1
|
||||
JGE next_vect
|
||||
|
||||
MOVOU X0, (DST)(POS*1)
|
||||
MOVOU X1, 16(DST)(POS*1)
|
||||
MOVOU X2, 32(DST)(POS*1)
|
||||
MOVOU X3, 48(DST)(POS*1)
|
||||
|
||||
ADDQ $64, POS
|
||||
CMPQ LEN, POS
|
||||
JNE loop64b
|
||||
RET
|
||||
|
||||
loop_1b:
|
||||
MOVQ VECT, TMP1
|
||||
MOVQ $0, TMP2
|
||||
MOVQ (SRC)(TMP2*1), TMP3
|
||||
SUBQ $2, TMP1
|
||||
MOVB -1(TMP3)(LEN*1), TMP5
|
||||
|
||||
next_vect_1b:
|
||||
ADDQ $24, TMP2
|
||||
MOVQ (SRC)(TMP2*1), TMP3
|
||||
MOVB -1(TMP3)(LEN*1), TMP6
|
||||
XORB TMP6, TMP5
|
||||
SUBQ $1, TMP1
|
||||
JGE next_vect_1b
|
||||
|
||||
MOVB TMP5, -1(DST)(LEN*1)
|
||||
SUBQ $1, LEN
|
||||
TESTQ $7, LEN
|
||||
JNZ loop_1b
|
||||
|
||||
CMPQ LEN, $0
|
||||
JE ret
|
||||
TESTQ $63, LEN
|
||||
JZ aligned
|
||||
|
||||
not_aligned:
|
||||
TESTQ $7, LEN
|
||||
JNE loop_1b
|
||||
MOVQ LEN, TMP4
|
||||
ANDQ $63, TMP4
|
||||
|
||||
loop_8b:
|
||||
MOVQ VECT, TMP1
|
||||
MOVQ $0, TMP2
|
||||
MOVQ (SRC)(TMP2*1), TMP3
|
||||
SUBQ $2, TMP1
|
||||
MOVQ -8(TMP3)(LEN*1), TMP5
|
||||
|
||||
next_vect_8b:
|
||||
ADDQ $24, TMP2
|
||||
MOVQ (SRC)(TMP2*1), TMP3
|
||||
MOVQ -8(TMP3)(LEN*1), TMP6
|
||||
XORQ TMP6, TMP5
|
||||
SUBQ $1, TMP1
|
||||
JGE next_vect_8b
|
||||
|
||||
MOVQ TMP5, -8(DST)(LEN*1)
|
||||
SUBQ $8, LEN
|
||||
SUBQ $8, TMP4
|
||||
JG loop_8b
|
||||
|
||||
CMPQ LEN, $64
|
||||
JGE aligned
|
||||
RET
|
||||
|
||||
ret:
|
||||
RET
|
||||
|
||||
// func matrixSSE2big(dst []byte, src [][]byte)
|
||||
TEXT ·matrixSSE2big(SB), NOSPLIT, $0
|
||||
MOVQ dst+0(FP), DST
|
||||
MOVQ src+24(FP), SRC
|
||||
MOVQ vec+32(FP), VECT
|
||||
MOVQ len+8(FP), LEN
|
||||
TESTQ $63, LEN
|
||||
JNZ not_aligned
|
||||
|
||||
aligned:
|
||||
MOVQ $0, POS
|
||||
|
||||
loop64b:
|
||||
MOVQ VECT, TMP1
|
||||
SUBQ $2, TMP1
|
||||
MOVQ $0, TMP2
|
||||
MOVQ (SRC)(TMP2*1), TMP3
|
||||
MOVQ TMP3, TMP4
|
||||
MOVOU (TMP3)(POS*1), X0
|
||||
MOVOU 16(TMP4)(POS*1), X1
|
||||
MOVOU 32(TMP3)(POS*1), X2
|
||||
MOVOU 48(TMP4)(POS*1), X3
|
||||
|
||||
next_vect:
|
||||
ADDQ $24, TMP2
|
||||
MOVQ (SRC)(TMP2*1), TMP3
|
||||
MOVQ TMP3, TMP4
|
||||
MOVOU (TMP3)(POS*1), X4
|
||||
MOVOU 16(TMP4)(POS*1), X5
|
||||
MOVOU 32(TMP3)(POS*1), X6
|
||||
MOVOU 48(TMP4)(POS*1), X7
|
||||
PXOR X4, X0
|
||||
PXOR X5, X1
|
||||
PXOR X6, X2
|
||||
PXOR X7, X3
|
||||
SUBQ $1, TMP1
|
||||
JGE next_vect
|
||||
|
||||
LONG $0xe70f4266; WORD $0x0304
|
||||
LONG $0xe70f4266; WORD $0x034c; BYTE $0x10
|
||||
LONG $0xe70f4266; WORD $0x0354; BYTE $0x20
|
||||
LONG $0xe70f4266; WORD $0x035c; BYTE $0x30
|
||||
|
||||
ADDQ $64, POS
|
||||
CMPQ LEN, POS
|
||||
JNE loop64b
|
||||
RET
|
||||
|
||||
loop_1b:
|
||||
MOVQ VECT, TMP1
|
||||
MOVQ $0, TMP2
|
||||
MOVQ (SRC)(TMP2*1), TMP3
|
||||
SUBQ $2, TMP1
|
||||
MOVB -1(TMP3)(LEN*1), TMP5
|
||||
|
||||
next_vect_1b:
|
||||
ADDQ $24, TMP2
|
||||
MOVQ (SRC)(TMP2*1), TMP3
|
||||
MOVB -1(TMP3)(LEN*1), TMP6
|
||||
XORB TMP6, TMP5
|
||||
SUBQ $1, TMP1
|
||||
JGE next_vect_1b
|
||||
|
||||
MOVB TMP5, -1(DST)(LEN*1)
|
||||
SUBQ $1, LEN
|
||||
TESTQ $7, LEN
|
||||
JNZ loop_1b
|
||||
|
||||
CMPQ LEN, $0
|
||||
JE ret
|
||||
TESTQ $63, LEN
|
||||
JZ aligned
|
||||
|
||||
not_aligned:
|
||||
TESTQ $7, LEN
|
||||
JNE loop_1b
|
||||
MOVQ LEN, TMP4
|
||||
ANDQ $63, TMP4
|
||||
|
||||
loop_8b:
|
||||
MOVQ VECT, TMP1
|
||||
MOVQ $0, TMP2
|
||||
MOVQ (SRC)(TMP2*1), TMP3
|
||||
SUBQ $2, TMP1
|
||||
MOVQ -8(TMP3)(LEN*1), TMP5
|
||||
|
||||
next_vect_8b:
|
||||
ADDQ $24, TMP2
|
||||
MOVQ (SRC)(TMP2*1), TMP3
|
||||
MOVQ -8(TMP3)(LEN*1), TMP6
|
||||
XORQ TMP6, TMP5
|
||||
SUBQ $1, TMP1
|
||||
JGE next_vect_8b
|
||||
|
||||
MOVQ TMP5, -8(DST)(LEN*1)
|
||||
SUBQ $8, LEN
|
||||
SUBQ $8, TMP4
|
||||
JG loop_8b
|
||||
|
||||
CMPQ LEN, $64
|
||||
JGE aligned
|
||||
RET
|
||||
|
||||
ret:
|
||||
RET
|
||||
|
||||
TEXT ·hasSSE2(SB), NOSPLIT, $0
|
||||
XORQ AX, AX
|
||||
INCL AX
|
||||
CPUID
|
||||
SHRQ $26, DX
|
||||
ANDQ $1, DX
|
||||
MOVB DX, ret+0(FP)
|
||||
RET
|
||||
|
||||
49
vendor/github.com/templexxx/xor/xor.go
generated
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
package xor
|
||||
|
||||
// SIMD Extensions
|
||||
const (
|
||||
none = iota
|
||||
avx2
|
||||
// first introduced by Intel with the initial version of the Pentium 4 in 2001
|
||||
// so I think we can assume all amd64 has sse2
|
||||
sse2
|
||||
)
|
||||
|
||||
var extension = none
|
||||
|
||||
// Bytes : chose the shortest one as xor size
|
||||
// it's better to use it for big data ( > 64bytes )
|
||||
func Bytes(dst, src0, src1 []byte) {
|
||||
size := len(dst)
|
||||
if size > len(src0) {
|
||||
size = len(src0)
|
||||
}
|
||||
if size > len(src1) {
|
||||
size = len(src1)
|
||||
}
|
||||
xorBytes(dst, src0, src1, size)
|
||||
}
|
||||
|
||||
// BytesSameLen : all slice's length must be equal
|
||||
// cut size branch, save time for small data
|
||||
func BytesSameLen(dst, src0, src1 []byte) {
|
||||
xorSrc1(dst, src0, src1)
|
||||
}
|
||||
|
||||
// BytesSrc0 : src1 >= src0, dst >= src0
|
||||
// xor src0's len bytes
|
||||
func BytesSrc0(dst, src0, src1 []byte) {
|
||||
xorSrc0(dst, src0, src1)
|
||||
}
|
||||
|
||||
// BytesSrc1 : src0 >= src1, dst >= src1
|
||||
// xor src1's len bytes
|
||||
func BytesSrc1(dst, src0, src1 []byte) {
|
||||
xorSrc1(dst, src0, src1)
|
||||
}
|
||||
|
||||
// Matrix : all slice's length must be equal && != 0
|
||||
// len(src) must >= 2
|
||||
func Matrix(dst []byte, src [][]byte) {
|
||||
xorMatrix(dst, src)
|
||||
}
|
||||
120
vendor/github.com/templexxx/xor/xor_amd64.go
generated
vendored
Normal file
@ -0,0 +1,120 @@
|
||||
package xor
|
||||
|
||||
import "github.com/templexxx/cpufeat"
|
||||
|
||||
func init() {
|
||||
getEXT()
|
||||
}
|
||||
|
||||
func getEXT() {
|
||||
if cpufeat.X86.HasAVX2 {
|
||||
extension = avx2
|
||||
} else {
|
||||
extension = sse2
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func xorBytes(dst, src0, src1 []byte, size int) {
|
||||
switch extension {
|
||||
case avx2:
|
||||
bytesAVX2(dst, src0, src1, size)
|
||||
default:
|
||||
bytesSSE2(dst, src0, src1, size)
|
||||
}
|
||||
}
|
||||
|
||||
// non-temporal hint store
|
||||
const nontmp = 8 * 1024
|
||||
const avx2loopsize = 128
|
||||
|
||||
func bytesAVX2(dst, src0, src1 []byte, size int) {
|
||||
if size < avx2loopsize {
|
||||
bytesAVX2mini(dst, src0, src1, size)
|
||||
} else if size >= avx2loopsize && size <= nontmp {
|
||||
bytesAVX2small(dst, src0, src1, size)
|
||||
} else {
|
||||
bytesAVX2big(dst, src0, src1, size)
|
||||
}
|
||||
}
|
||||
|
||||
const sse2loopsize = 64
|
||||
|
||||
func bytesSSE2(dst, src0, src1 []byte, size int) {
|
||||
if size < sse2loopsize {
|
||||
bytesSSE2mini(dst, src0, src1, size)
|
||||
} else if size >= sse2loopsize && size <= nontmp {
|
||||
bytesSSE2small(dst, src0, src1, size)
|
||||
} else {
|
||||
bytesSSE2big(dst, src0, src1, size)
|
||||
}
|
||||
}
|
||||
|
||||
func xorMatrix(dst []byte, src [][]byte) {
|
||||
switch extension {
|
||||
case avx2:
|
||||
matrixAVX2(dst, src)
|
||||
default:
|
||||
matrixSSE2(dst, src)
|
||||
}
|
||||
}
|
||||
|
||||
func matrixAVX2(dst []byte, src [][]byte) {
|
||||
size := len(dst)
|
||||
if size > nontmp {
|
||||
matrixAVX2big(dst, src)
|
||||
} else {
|
||||
matrixAVX2small(dst, src)
|
||||
}
|
||||
}
|
||||
|
||||
func matrixSSE2(dst []byte, src [][]byte) {
|
||||
size := len(dst)
|
||||
if size > nontmp {
|
||||
matrixSSE2big(dst, src)
|
||||
} else {
|
||||
matrixSSE2small(dst, src)
|
||||
}
|
||||
}
|
||||
|
||||
//go:noescape
|
||||
func xorSrc0(dst, src0, src1 []byte)
|
||||
|
||||
//go:noescape
|
||||
func xorSrc1(dst, src0, src1 []byte)
|
||||
|
||||
//go:noescape
|
||||
func bytesAVX2mini(dst, src0, src1 []byte, size int)
|
||||
|
||||
//go:noescape
|
||||
func bytesAVX2big(dst, src0, src1 []byte, size int)
|
||||
|
||||
//go:noescape
|
||||
func bytesAVX2small(dst, src0, src1 []byte, size int)
|
||||
|
||||
//go:noescape
|
||||
func bytesSSE2mini(dst, src0, src1 []byte, size int)
|
||||
|
||||
//go:noescape
|
||||
func bytesSSE2small(dst, src0, src1 []byte, size int)
|
||||
|
||||
//go:noescape
|
||||
func bytesSSE2big(dst, src0, src1 []byte, size int)
|
||||
|
||||
//go:noescape
|
||||
func matrixAVX2small(dst []byte, src [][]byte)
|
||||
|
||||
//go:noescape
|
||||
func matrixAVX2big(dst []byte, src [][]byte)
|
||||
|
||||
//go:noescape
|
||||
func matrixSSE2small(dst []byte, src [][]byte)
|
||||
|
||||
//go:noescape
|
||||
func matrixSSE2big(dst []byte, src [][]byte)
|
||||
|
||||
//go:noescape
|
||||
func hasAVX2() bool
|
||||
|
||||
//go:noescape
|
||||
func hasSSE2() bool
|
||||
19
vendor/github.com/templexxx/xor/xor_other.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
// +build !amd64 noasm
|
||||
|
||||
package xor
|
||||
|
||||
func xorBytes(dst, src0, src1 []byte, size int) {
|
||||
bytesNoSIMD(dst, src0, src1, size)
|
||||
}
|
||||
|
||||
func xorMatrix(dst []byte, src [][]byte) {
|
||||
matrixNoSIMD(dst, src)
|
||||
}
|
||||
|
||||
func xorSrc0(dst, src0, src1 []byte) {
|
||||
bytesNoSIMD(dst, src0, src1, len(src0))
|
||||
}
|
||||
|
||||
func xorSrc1(dst, src0, src1 []byte) {
|
||||
bytesNoSIMD(dst, src0, src1, len(src1))
|
||||
}
|
||||
201
vendor/github.com/tjfoc/gmsm/LICENSE
generated
vendored
Normal file
@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
342
vendor/github.com/tjfoc/gmsm/sm4/sm4.go
generated
vendored
Normal file
@ -0,0 +1,342 @@
|
||||
/*
|
||||
Copyright Suzhou Tongji Fintech Research Institute 2017 All Rights Reserved.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
sm4 acceration
|
||||
modified by Jack, 2017 Oct
|
||||
*/
|
||||
|
||||
package sm4
|
||||
|
||||
import (
|
||||
"crypto/cipher"
|
||||
"crypto/rand"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
const BlockSize = 16
|
||||
|
||||
type SM4Key []byte
|
||||
|
||||
type KeySizeError int
|
||||
|
||||
// Cipher is an instance of SM4 encryption.
|
||||
type Sm4Cipher struct {
|
||||
subkeys []uint32
|
||||
block1 []uint32
|
||||
block2 []byte
|
||||
}
|
||||
|
||||
// sm4密钥参量
|
||||
var fk = [4]uint32{
|
||||
0xa3b1bac6, 0x56aa3350, 0x677d9197, 0xb27022dc,
|
||||
}
|
||||
|
||||
// sm4密钥参量
|
||||
var ck = [32]uint32{
|
||||
0x00070e15, 0x1c232a31, 0x383f464d, 0x545b6269,
|
||||
0x70777e85, 0x8c939aa1, 0xa8afb6bd, 0xc4cbd2d9,
|
||||
0xe0e7eef5, 0xfc030a11, 0x181f262d, 0x343b4249,
|
||||
0x50575e65, 0x6c737a81, 0x888f969d, 0xa4abb2b9,
|
||||
0xc0c7ced5, 0xdce3eaf1, 0xf8ff060d, 0x141b2229,
|
||||
0x30373e45, 0x4c535a61, 0x686f767d, 0x848b9299,
|
||||
0xa0a7aeb5, 0xbcc3cad1, 0xd8dfe6ed, 0xf4fb0209,
|
||||
0x10171e25, 0x2c333a41, 0x484f565d, 0x646b7279,
|
||||
}
|
||||
|
||||
// sm4密钥参量
|
||||
var sbox = [256]uint8{
|
||||
0xd6, 0x90, 0xe9, 0xfe, 0xcc, 0xe1, 0x3d, 0xb7, 0x16, 0xb6, 0x14, 0xc2, 0x28, 0xfb, 0x2c, 0x05,
|
||||
0x2b, 0x67, 0x9a, 0x76, 0x2a, 0xbe, 0x04, 0xc3, 0xaa, 0x44, 0x13, 0x26, 0x49, 0x86, 0x06, 0x99,
|
||||
0x9c, 0x42, 0x50, 0xf4, 0x91, 0xef, 0x98, 0x7a, 0x33, 0x54, 0x0b, 0x43, 0xed, 0xcf, 0xac, 0x62,
|
||||
0xe4, 0xb3, 0x1c, 0xa9, 0xc9, 0x08, 0xe8, 0x95, 0x80, 0xdf, 0x94, 0xfa, 0x75, 0x8f, 0x3f, 0xa6,
|
||||
0x47, 0x07, 0xa7, 0xfc, 0xf3, 0x73, 0x17, 0xba, 0x83, 0x59, 0x3c, 0x19, 0xe6, 0x85, 0x4f, 0xa8,
|
||||
0x68, 0x6b, 0x81, 0xb2, 0x71, 0x64, 0xda, 0x8b, 0xf8, 0xeb, 0x0f, 0x4b, 0x70, 0x56, 0x9d, 0x35,
|
||||
0x1e, 0x24, 0x0e, 0x5e, 0x63, 0x58, 0xd1, 0xa2, 0x25, 0x22, 0x7c, 0x3b, 0x01, 0x21, 0x78, 0x87,
|
||||
0xd4, 0x00, 0x46, 0x57, 0x9f, 0xd3, 0x27, 0x52, 0x4c, 0x36, 0x02, 0xe7, 0xa0, 0xc4, 0xc8, 0x9e,
|
||||
0xea, 0xbf, 0x8a, 0xd2, 0x40, 0xc7, 0x38, 0xb5, 0xa3, 0xf7, 0xf2, 0xce, 0xf9, 0x61, 0x15, 0xa1,
|
||||
0xe0, 0xae, 0x5d, 0xa4, 0x9b, 0x34, 0x1a, 0x55, 0xad, 0x93, 0x32, 0x30, 0xf5, 0x8c, 0xb1, 0xe3,
|
||||
0x1d, 0xf6, 0xe2, 0x2e, 0x82, 0x66, 0xca, 0x60, 0xc0, 0x29, 0x23, 0xab, 0x0d, 0x53, 0x4e, 0x6f,
|
||||
0xd5, 0xdb, 0x37, 0x45, 0xde, 0xfd, 0x8e, 0x2f, 0x03, 0xff, 0x6a, 0x72, 0x6d, 0x6c, 0x5b, 0x51,
|
||||
0x8d, 0x1b, 0xaf, 0x92, 0xbb, 0xdd, 0xbc, 0x7f, 0x11, 0xd9, 0x5c, 0x41, 0x1f, 0x10, 0x5a, 0xd8,
|
||||
0x0a, 0xc1, 0x31, 0x88, 0xa5, 0xcd, 0x7b, 0xbd, 0x2d, 0x74, 0xd0, 0x12, 0xb8, 0xe5, 0xb4, 0xb0,
|
||||
0x89, 0x69, 0x97, 0x4a, 0x0c, 0x96, 0x77, 0x7e, 0x65, 0xb9, 0xf1, 0x09, 0xc5, 0x6e, 0xc6, 0x84,
|
||||
0x18, 0xf0, 0x7d, 0xec, 0x3a, 0xdc, 0x4d, 0x20, 0x79, 0xee, 0x5f, 0x3e, 0xd7, 0xcb, 0x39, 0x48,
|
||||
}
|
||||
|
||||
var sbox0 = [256]uint32{
|
||||
0xd55b5b8e, 0x924242d0, 0xeaa7a74d, 0xfdfbfb06, 0xcf3333fc, 0xe2878765, 0x3df4f4c9, 0xb5dede6b, 0x1658584e, 0xb4dada6e, 0x14505044, 0xc10b0bca, 0x28a0a088, 0xf8efef17, 0x2cb0b09c, 0x05141411,
|
||||
0x2bacac87, 0x669d9dfb, 0x986a6af2, 0x77d9d9ae, 0x2aa8a882, 0xbcfafa46, 0x04101014, 0xc00f0fcf, 0xa8aaaa02, 0x45111154, 0x134c4c5f, 0x269898be, 0x4825256d, 0x841a1a9e, 0x0618181e, 0x9b6666fd,
|
||||
0x9e7272ec, 0x4309094a, 0x51414110, 0xf7d3d324, 0x934646d5, 0xecbfbf53, 0x9a6262f8, 0x7be9e992, 0x33ccccff, 0x55515104, 0x0b2c2c27, 0x420d0d4f, 0xeeb7b759, 0xcc3f3ff3, 0xaeb2b21c, 0x638989ea,
|
||||
0xe7939374, 0xb1cece7f, 0x1c70706c, 0xaba6a60d, 0xca2727ed, 0x08202028, 0xeba3a348, 0x975656c1, 0x82020280, 0xdc7f7fa3, 0x965252c4, 0xf9ebeb12, 0x74d5d5a1, 0x8d3e3eb3, 0x3ffcfcc3, 0xa49a9a3e,
|
||||
0x461d1d5b, 0x071c1c1b, 0xa59e9e3b, 0xfff3f30c, 0xf0cfcf3f, 0x72cdcdbf, 0x175c5c4b, 0xb8eaea52, 0x810e0e8f, 0x5865653d, 0x3cf0f0cc, 0x1964647d, 0xe59b9b7e, 0x87161691, 0x4e3d3d73, 0xaaa2a208,
|
||||
0x69a1a1c8, 0x6aadadc7, 0x83060685, 0xb0caca7a, 0x70c5c5b5, 0x659191f4, 0xd96b6bb2, 0x892e2ea7, 0xfbe3e318, 0xe8afaf47, 0x0f3c3c33, 0x4a2d2d67, 0x71c1c1b0, 0x5759590e, 0x9f7676e9, 0x35d4d4e1,
|
||||
0x1e787866, 0x249090b4, 0x0e383836, 0x5f797926, 0x628d8def, 0x59616138, 0xd2474795, 0xa08a8a2a, 0x259494b1, 0x228888aa, 0x7df1f18c, 0x3bececd7, 0x01040405, 0x218484a5, 0x79e1e198, 0x851e1e9b,
|
||||
0xd7535384, 0x00000000, 0x4719195e, 0x565d5d0b, 0x9d7e7ee3, 0xd04f4f9f, 0x279c9cbb, 0x5349491a, 0x4d31317c, 0x36d8d8ee, 0x0208080a, 0xe49f9f7b, 0xa2828220, 0xc71313d4, 0xcb2323e8, 0x9c7a7ae6,
|
||||
0xe9abab42, 0xbdfefe43, 0x882a2aa2, 0xd14b4b9a, 0x41010140, 0xc41f1fdb, 0x38e0e0d8, 0xb7d6d661, 0xa18e8e2f, 0xf4dfdf2b, 0xf1cbcb3a, 0xcd3b3bf6, 0xfae7e71d, 0x608585e5, 0x15545441, 0xa3868625,
|
||||
0xe3838360, 0xacbaba16, 0x5c757529, 0xa6929234, 0x996e6ef7, 0x34d0d0e4, 0x1a686872, 0x54555501, 0xafb6b619, 0x914e4edf, 0x32c8c8fa, 0x30c0c0f0, 0xf6d7d721, 0x8e3232bc, 0xb3c6c675, 0xe08f8f6f,
|
||||
0x1d747469, 0xf5dbdb2e, 0xe18b8b6a, 0x2eb8b896, 0x800a0a8a, 0x679999fe, 0xc92b2be2, 0x618181e0, 0xc30303c0, 0x29a4a48d, 0x238c8caf, 0xa9aeae07, 0x0d343439, 0x524d4d1f, 0x4f393976, 0x6ebdbdd3,
|
||||
0xd6575781, 0xd86f6fb7, 0x37dcdceb, 0x44151551, 0xdd7b7ba6, 0xfef7f709, 0x8c3a3ab6, 0x2fbcbc93, 0x030c0c0f, 0xfcffff03, 0x6ba9a9c2, 0x73c9c9ba, 0x6cb5b5d9, 0x6db1b1dc, 0x5a6d6d37, 0x50454515,
|
||||
0x8f3636b9, 0x1b6c6c77, 0xadbebe13, 0x904a4ada, 0xb9eeee57, 0xde7777a9, 0xbef2f24c, 0x7efdfd83, 0x11444455, 0xda6767bd, 0x5d71712c, 0x40050545, 0x1f7c7c63, 0x10404050, 0x5b696932, 0xdb6363b8,
|
||||
0x0a282822, 0xc20707c5, 0x31c4c4f5, 0x8a2222a8, 0xa7969631, 0xce3737f9, 0x7aeded97, 0xbff6f649, 0x2db4b499, 0x75d1d1a4, 0xd3434390, 0x1248485a, 0xbae2e258, 0xe6979771, 0xb6d2d264, 0xb2c2c270,
|
||||
0x8b2626ad, 0x68a5a5cd, 0x955e5ecb, 0x4b292962, 0x0c30303c, 0x945a5ace, 0x76ddddab, 0x7ff9f986, 0x649595f1, 0xbbe6e65d, 0xf2c7c735, 0x0924242d, 0xc61717d1, 0x6fb9b9d6, 0xc51b1bde, 0x86121294,
|
||||
0x18606078, 0xf3c3c330, 0x7cf5f589, 0xefb3b35c, 0x3ae8e8d2, 0xdf7373ac, 0x4c353579, 0x208080a0, 0x78e5e59d, 0xedbbbb56, 0x5e7d7d23, 0x3ef8f8c6, 0xd45f5f8b, 0xc82f2fe7, 0x39e4e4dd, 0x49212168,
|
||||
}
|
||||
|
||||
var sbox1 = [256]uint32{
|
||||
0x5b5b8ed5, 0x4242d092, 0xa7a74dea, 0xfbfb06fd, 0x3333fccf, 0x878765e2, 0xf4f4c93d, 0xdede6bb5, 0x58584e16, 0xdada6eb4, 0x50504414, 0x0b0bcac1, 0xa0a08828, 0xefef17f8, 0xb0b09c2c, 0x14141105,
|
||||
0xacac872b, 0x9d9dfb66, 0x6a6af298, 0xd9d9ae77, 0xa8a8822a, 0xfafa46bc, 0x10101404, 0x0f0fcfc0, 0xaaaa02a8, 0x11115445, 0x4c4c5f13, 0x9898be26, 0x25256d48, 0x1a1a9e84, 0x18181e06, 0x6666fd9b,
|
||||
0x7272ec9e, 0x09094a43, 0x41411051, 0xd3d324f7, 0x4646d593, 0xbfbf53ec, 0x6262f89a, 0xe9e9927b, 0xccccff33, 0x51510455, 0x2c2c270b, 0x0d0d4f42, 0xb7b759ee, 0x3f3ff3cc, 0xb2b21cae, 0x8989ea63,
|
||||
0x939374e7, 0xcece7fb1, 0x70706c1c, 0xa6a60dab, 0x2727edca, 0x20202808, 0xa3a348eb, 0x5656c197, 0x02028082, 0x7f7fa3dc, 0x5252c496, 0xebeb12f9, 0xd5d5a174, 0x3e3eb38d, 0xfcfcc33f, 0x9a9a3ea4,
|
||||
0x1d1d5b46, 0x1c1c1b07, 0x9e9e3ba5, 0xf3f30cff, 0xcfcf3ff0, 0xcdcdbf72, 0x5c5c4b17, 0xeaea52b8, 0x0e0e8f81, 0x65653d58, 0xf0f0cc3c, 0x64647d19, 0x9b9b7ee5, 0x16169187, 0x3d3d734e, 0xa2a208aa,
|
||||
0xa1a1c869, 0xadadc76a, 0x06068583, 0xcaca7ab0, 0xc5c5b570, 0x9191f465, 0x6b6bb2d9, 0x2e2ea789, 0xe3e318fb, 0xafaf47e8, 0x3c3c330f, 0x2d2d674a, 0xc1c1b071, 0x59590e57, 0x7676e99f, 0xd4d4e135,
|
||||
0x7878661e, 0x9090b424, 0x3838360e, 0x7979265f, 0x8d8def62, 0x61613859, 0x474795d2, 0x8a8a2aa0, 0x9494b125, 0x8888aa22, 0xf1f18c7d, 0xececd73b, 0x04040501, 0x8484a521, 0xe1e19879, 0x1e1e9b85,
|
||||
0x535384d7, 0x00000000, 0x19195e47, 0x5d5d0b56, 0x7e7ee39d, 0x4f4f9fd0, 0x9c9cbb27, 0x49491a53, 0x31317c4d, 0xd8d8ee36, 0x08080a02, 0x9f9f7be4, 0x828220a2, 0x1313d4c7, 0x2323e8cb, 0x7a7ae69c,
|
||||
0xabab42e9, 0xfefe43bd, 0x2a2aa288, 0x4b4b9ad1, 0x01014041, 0x1f1fdbc4, 0xe0e0d838, 0xd6d661b7, 0x8e8e2fa1, 0xdfdf2bf4, 0xcbcb3af1, 0x3b3bf6cd, 0xe7e71dfa, 0x8585e560, 0x54544115, 0x868625a3,
|
||||
0x838360e3, 0xbaba16ac, 0x7575295c, 0x929234a6, 0x6e6ef799, 0xd0d0e434, 0x6868721a, 0x55550154, 0xb6b619af, 0x4e4edf91, 0xc8c8fa32, 0xc0c0f030, 0xd7d721f6, 0x3232bc8e, 0xc6c675b3, 0x8f8f6fe0,
|
||||
0x7474691d, 0xdbdb2ef5, 0x8b8b6ae1, 0xb8b8962e, 0x0a0a8a80, 0x9999fe67, 0x2b2be2c9, 0x8181e061, 0x0303c0c3, 0xa4a48d29, 0x8c8caf23, 0xaeae07a9, 0x3434390d, 0x4d4d1f52, 0x3939764f, 0xbdbdd36e,
|
||||
0x575781d6, 0x6f6fb7d8, 0xdcdceb37, 0x15155144, 0x7b7ba6dd, 0xf7f709fe, 0x3a3ab68c, 0xbcbc932f, 0x0c0c0f03, 0xffff03fc, 0xa9a9c26b, 0xc9c9ba73, 0xb5b5d96c, 0xb1b1dc6d, 0x6d6d375a, 0x45451550,
|
||||
0x3636b98f, 0x6c6c771b, 0xbebe13ad, 0x4a4ada90, 0xeeee57b9, 0x7777a9de, 0xf2f24cbe, 0xfdfd837e, 0x44445511, 0x6767bdda, 0x71712c5d, 0x05054540, 0x7c7c631f, 0x40405010, 0x6969325b, 0x6363b8db,
|
||||
0x2828220a, 0x0707c5c2, 0xc4c4f531, 0x2222a88a, 0x969631a7, 0x3737f9ce, 0xeded977a, 0xf6f649bf, 0xb4b4992d, 0xd1d1a475, 0x434390d3, 0x48485a12, 0xe2e258ba, 0x979771e6, 0xd2d264b6, 0xc2c270b2,
|
||||
0x2626ad8b, 0xa5a5cd68, 0x5e5ecb95, 0x2929624b, 0x30303c0c, 0x5a5ace94, 0xddddab76, 0xf9f9867f, 0x9595f164, 0xe6e65dbb, 0xc7c735f2, 0x24242d09, 0x1717d1c6, 0xb9b9d66f, 0x1b1bdec5, 0x12129486,
|
||||
0x60607818, 0xc3c330f3, 0xf5f5897c, 0xb3b35cef, 0xe8e8d23a, 0x7373acdf, 0x3535794c, 0x8080a020, 0xe5e59d78, 0xbbbb56ed, 0x7d7d235e, 0xf8f8c63e, 0x5f5f8bd4, 0x2f2fe7c8, 0xe4e4dd39, 0x21216849,
|
||||
}
|
||||
|
||||
var sbox2 = [256]uint32{
|
||||
0x5b8ed55b, 0x42d09242, 0xa74deaa7, 0xfb06fdfb, 0x33fccf33, 0x8765e287, 0xf4c93df4, 0xde6bb5de, 0x584e1658, 0xda6eb4da, 0x50441450, 0x0bcac10b, 0xa08828a0, 0xef17f8ef, 0xb09c2cb0, 0x14110514,
|
||||
0xac872bac, 0x9dfb669d, 0x6af2986a, 0xd9ae77d9, 0xa8822aa8, 0xfa46bcfa, 0x10140410, 0x0fcfc00f, 0xaa02a8aa, 0x11544511, 0x4c5f134c, 0x98be2698, 0x256d4825, 0x1a9e841a, 0x181e0618, 0x66fd9b66,
|
||||
0x72ec9e72, 0x094a4309, 0x41105141, 0xd324f7d3, 0x46d59346, 0xbf53ecbf, 0x62f89a62, 0xe9927be9, 0xccff33cc, 0x51045551, 0x2c270b2c, 0x0d4f420d, 0xb759eeb7, 0x3ff3cc3f, 0xb21caeb2, 0x89ea6389,
|
||||
0x9374e793, 0xce7fb1ce, 0x706c1c70, 0xa60daba6, 0x27edca27, 0x20280820, 0xa348eba3, 0x56c19756, 0x02808202, 0x7fa3dc7f, 0x52c49652, 0xeb12f9eb, 0xd5a174d5, 0x3eb38d3e, 0xfcc33ffc, 0x9a3ea49a,
|
||||
0x1d5b461d, 0x1c1b071c, 0x9e3ba59e, 0xf30cfff3, 0xcf3ff0cf, 0xcdbf72cd, 0x5c4b175c, 0xea52b8ea, 0x0e8f810e, 0x653d5865, 0xf0cc3cf0, 0x647d1964, 0x9b7ee59b, 0x16918716, 0x3d734e3d, 0xa208aaa2,
|
||||
0xa1c869a1, 0xadc76aad, 0x06858306, 0xca7ab0ca, 0xc5b570c5, 0x91f46591, 0x6bb2d96b, 0x2ea7892e, 0xe318fbe3, 0xaf47e8af, 0x3c330f3c, 0x2d674a2d, 0xc1b071c1, 0x590e5759, 0x76e99f76, 0xd4e135d4,
|
||||
0x78661e78, 0x90b42490, 0x38360e38, 0x79265f79, 0x8def628d, 0x61385961, 0x4795d247, 0x8a2aa08a, 0x94b12594, 0x88aa2288, 0xf18c7df1, 0xecd73bec, 0x04050104, 0x84a52184, 0xe19879e1, 0x1e9b851e,
|
||||
0x5384d753, 0x00000000, 0x195e4719, 0x5d0b565d, 0x7ee39d7e, 0x4f9fd04f, 0x9cbb279c, 0x491a5349, 0x317c4d31, 0xd8ee36d8, 0x080a0208, 0x9f7be49f, 0x8220a282, 0x13d4c713, 0x23e8cb23, 0x7ae69c7a,
|
||||
0xab42e9ab, 0xfe43bdfe, 0x2aa2882a, 0x4b9ad14b, 0x01404101, 0x1fdbc41f, 0xe0d838e0, 0xd661b7d6, 0x8e2fa18e, 0xdf2bf4df, 0xcb3af1cb, 0x3bf6cd3b, 0xe71dfae7, 0x85e56085, 0x54411554, 0x8625a386,
|
||||
0x8360e383, 0xba16acba, 0x75295c75, 0x9234a692, 0x6ef7996e, 0xd0e434d0, 0x68721a68, 0x55015455, 0xb619afb6, 0x4edf914e, 0xc8fa32c8, 0xc0f030c0, 0xd721f6d7, 0x32bc8e32, 0xc675b3c6, 0x8f6fe08f,
|
||||
0x74691d74, 0xdb2ef5db, 0x8b6ae18b, 0xb8962eb8, 0x0a8a800a, 0x99fe6799, 0x2be2c92b, 0x81e06181, 0x03c0c303, 0xa48d29a4, 0x8caf238c, 0xae07a9ae, 0x34390d34, 0x4d1f524d, 0x39764f39, 0xbdd36ebd,
|
||||
0x5781d657, 0x6fb7d86f, 0xdceb37dc, 0x15514415, 0x7ba6dd7b, 0xf709fef7, 0x3ab68c3a, 0xbc932fbc, 0x0c0f030c, 0xff03fcff, 0xa9c26ba9, 0xc9ba73c9, 0xb5d96cb5, 0xb1dc6db1, 0x6d375a6d, 0x45155045,
|
||||
0x36b98f36, 0x6c771b6c, 0xbe13adbe, 0x4ada904a, 0xee57b9ee, 0x77a9de77, 0xf24cbef2, 0xfd837efd, 0x44551144, 0x67bdda67, 0x712c5d71, 0x05454005, 0x7c631f7c, 0x40501040, 0x69325b69, 0x63b8db63,
|
||||
0x28220a28, 0x07c5c207, 0xc4f531c4, 0x22a88a22, 0x9631a796, 0x37f9ce37, 0xed977aed, 0xf649bff6, 0xb4992db4, 0xd1a475d1, 0x4390d343, 0x485a1248, 0xe258bae2, 0x9771e697, 0xd264b6d2, 0xc270b2c2,
|
||||
0x26ad8b26, 0xa5cd68a5, 0x5ecb955e, 0x29624b29, 0x303c0c30, 0x5ace945a, 0xddab76dd, 0xf9867ff9, 0x95f16495, 0xe65dbbe6, 0xc735f2c7, 0x242d0924, 0x17d1c617, 0xb9d66fb9, 0x1bdec51b, 0x12948612,
|
||||
0x60781860, 0xc330f3c3, 0xf5897cf5, 0xb35cefb3, 0xe8d23ae8, 0x73acdf73, 0x35794c35, 0x80a02080, 0xe59d78e5, 0xbb56edbb, 0x7d235e7d, 0xf8c63ef8, 0x5f8bd45f, 0x2fe7c82f, 0xe4dd39e4, 0x21684921,
|
||||
}
|
||||
|
||||
var sbox3 = [256]uint32{
|
||||
0x8ed55b5b, 0xd0924242, 0x4deaa7a7, 0x06fdfbfb, 0xfccf3333, 0x65e28787, 0xc93df4f4, 0x6bb5dede, 0x4e165858, 0x6eb4dada, 0x44145050, 0xcac10b0b, 0x8828a0a0, 0x17f8efef, 0x9c2cb0b0, 0x11051414,
|
||||
0x872bacac, 0xfb669d9d, 0xf2986a6a, 0xae77d9d9, 0x822aa8a8, 0x46bcfafa, 0x14041010, 0xcfc00f0f, 0x02a8aaaa, 0x54451111, 0x5f134c4c, 0xbe269898, 0x6d482525, 0x9e841a1a, 0x1e061818, 0xfd9b6666,
|
||||
0xec9e7272, 0x4a430909, 0x10514141, 0x24f7d3d3, 0xd5934646, 0x53ecbfbf, 0xf89a6262, 0x927be9e9, 0xff33cccc, 0x04555151, 0x270b2c2c, 0x4f420d0d, 0x59eeb7b7, 0xf3cc3f3f, 0x1caeb2b2, 0xea638989,
|
||||
0x74e79393, 0x7fb1cece, 0x6c1c7070, 0x0daba6a6, 0xedca2727, 0x28082020, 0x48eba3a3, 0xc1975656, 0x80820202, 0xa3dc7f7f, 0xc4965252, 0x12f9ebeb, 0xa174d5d5, 0xb38d3e3e, 0xc33ffcfc, 0x3ea49a9a,
|
||||
0x5b461d1d, 0x1b071c1c, 0x3ba59e9e, 0x0cfff3f3, 0x3ff0cfcf, 0xbf72cdcd, 0x4b175c5c, 0x52b8eaea, 0x8f810e0e, 0x3d586565, 0xcc3cf0f0, 0x7d196464, 0x7ee59b9b, 0x91871616, 0x734e3d3d, 0x08aaa2a2,
|
||||
0xc869a1a1, 0xc76aadad, 0x85830606, 0x7ab0caca, 0xb570c5c5, 0xf4659191, 0xb2d96b6b, 0xa7892e2e, 0x18fbe3e3, 0x47e8afaf, 0x330f3c3c, 0x674a2d2d, 0xb071c1c1, 0x0e575959, 0xe99f7676, 0xe135d4d4,
|
||||
0x661e7878, 0xb4249090, 0x360e3838, 0x265f7979, 0xef628d8d, 0x38596161, 0x95d24747, 0x2aa08a8a, 0xb1259494, 0xaa228888, 0x8c7df1f1, 0xd73becec, 0x05010404, 0xa5218484, 0x9879e1e1, 0x9b851e1e,
|
||||
0x84d75353, 0x00000000, 0x5e471919, 0x0b565d5d, 0xe39d7e7e, 0x9fd04f4f, 0xbb279c9c, 0x1a534949, 0x7c4d3131, 0xee36d8d8, 0x0a020808, 0x7be49f9f, 0x20a28282, 0xd4c71313, 0xe8cb2323, 0xe69c7a7a,
|
||||
0x42e9abab, 0x43bdfefe, 0xa2882a2a, 0x9ad14b4b, 0x40410101, 0xdbc41f1f, 0xd838e0e0, 0x61b7d6d6, 0x2fa18e8e, 0x2bf4dfdf, 0x3af1cbcb, 0xf6cd3b3b, 0x1dfae7e7, 0xe5608585, 0x41155454, 0x25a38686,
|
||||
0x60e38383, 0x16acbaba, 0x295c7575, 0x34a69292, 0xf7996e6e, 0xe434d0d0, 0x721a6868, 0x01545555, 0x19afb6b6, 0xdf914e4e, 0xfa32c8c8, 0xf030c0c0, 0x21f6d7d7, 0xbc8e3232, 0x75b3c6c6, 0x6fe08f8f,
|
||||
0x691d7474, 0x2ef5dbdb, 0x6ae18b8b, 0x962eb8b8, 0x8a800a0a, 0xfe679999, 0xe2c92b2b, 0xe0618181, 0xc0c30303, 0x8d29a4a4, 0xaf238c8c, 0x07a9aeae, 0x390d3434, 0x1f524d4d, 0x764f3939, 0xd36ebdbd,
|
||||
0x81d65757, 0xb7d86f6f, 0xeb37dcdc, 0x51441515, 0xa6dd7b7b, 0x09fef7f7, 0xb68c3a3a, 0x932fbcbc, 0x0f030c0c, 0x03fcffff, 0xc26ba9a9, 0xba73c9c9, 0xd96cb5b5, 0xdc6db1b1, 0x375a6d6d, 0x15504545,
|
||||
0xb98f3636, 0x771b6c6c, 0x13adbebe, 0xda904a4a, 0x57b9eeee, 0xa9de7777, 0x4cbef2f2, 0x837efdfd, 0x55114444, 0xbdda6767, 0x2c5d7171, 0x45400505, 0x631f7c7c, 0x50104040, 0x325b6969, 0xb8db6363,
|
||||
0x220a2828, 0xc5c20707, 0xf531c4c4, 0xa88a2222, 0x31a79696, 0xf9ce3737, 0x977aeded, 0x49bff6f6, 0x992db4b4, 0xa475d1d1, 0x90d34343, 0x5a124848, 0x58bae2e2, 0x71e69797, 0x64b6d2d2, 0x70b2c2c2,
|
||||
0xad8b2626, 0xcd68a5a5, 0xcb955e5e, 0x624b2929, 0x3c0c3030, 0xce945a5a, 0xab76dddd, 0x867ff9f9, 0xf1649595, 0x5dbbe6e6, 0x35f2c7c7, 0x2d092424, 0xd1c61717, 0xd66fb9b9, 0xdec51b1b, 0x94861212,
|
||||
0x78186060, 0x30f3c3c3, 0x897cf5f5, 0x5cefb3b3, 0xd23ae8e8, 0xacdf7373, 0x794c3535, 0xa0208080, 0x9d78e5e5, 0x56edbbbb, 0x235e7d7d, 0xc63ef8f8, 0x8bd45f5f, 0xe7c82f2f, 0xdd39e4e4, 0x68492121,
|
||||
}
|
||||
|
||||
func rl(x uint32, i uint8) uint32 { return (x << (i % 32)) | (x >> (32 - (i % 32))) }
|
||||
|
||||
func l0(b uint32) uint32 { return b ^ rl(b, 13) ^ rl(b, 23) }
|
||||
|
||||
func feistel0(x0, x1, x2, x3, rk uint32) uint32 { return x0 ^ l0(p(x1^x2^x3^rk)) }
|
||||
|
||||
//非线性变换τ(.)
|
||||
func p(a uint32) uint32 {
|
||||
return (uint32(sbox[a>>24]) << 24) ^ (uint32(sbox[(a>>16)&0xff]) << 16) ^ (uint32(sbox[(a>>8)&0xff]) << 8) ^ uint32(sbox[(a)&0xff])
|
||||
}
|
||||
|
||||
func permuteInitialBlock(b []uint32, block []byte) {
|
||||
for i := 0; i < 4; i++ {
|
||||
b[i] = (uint32(block[i*4]) << 24) | (uint32(block[i*4+1]) << 16) |
|
||||
(uint32(block[i*4+2]) << 8) | (uint32(block[i*4+3]))
|
||||
}
|
||||
}
|
||||
|
||||
func permuteFinalBlock(b []byte, block []uint32) {
|
||||
for i := 0; i < 4; i++ {
|
||||
b[i*4] = uint8(block[i] >> 24)
|
||||
b[i*4+1] = uint8(block[i] >> 16)
|
||||
b[i*4+2] = uint8(block[i] >> 8)
|
||||
b[i*4+3] = uint8(block[i])
|
||||
}
|
||||
}
|
||||
|
||||
//修改后的加密核心函数
|
||||
func cryptBlock(subkeys []uint32, b []uint32, r []byte, dst, src []byte, decrypt bool) {
|
||||
var x uint32
|
||||
permuteInitialBlock(b, src)
|
||||
if decrypt {
|
||||
for i := 0; i < 8; i++ {
|
||||
x = b[1] ^ b[2] ^ b[3] ^ subkeys[31-4*i]
|
||||
b[0] = b[0] ^ sbox0[x&0xff] ^ sbox1[(x>>8)&0xff] ^ sbox2[(x>>16)&0xff] ^ sbox3[(x>>24)&0xff]
|
||||
x = b[0] ^ b[2] ^ b[3] ^ subkeys[31-4*i-1]
|
||||
b[1] = b[1] ^ sbox0[x&0xff] ^ sbox1[(x>>8)&0xff] ^ sbox2[(x>>16)&0xff] ^ sbox3[(x>>24)&0xff]
|
||||
x = b[0] ^ b[1] ^ b[3] ^ subkeys[31-4*i-2]
|
||||
b[2] = b[2] ^ sbox0[x&0xff] ^ sbox1[(x>>8)&0xff] ^ sbox2[(x>>16)&0xff] ^ sbox3[(x>>24)&0xff]
|
||||
x = b[1] ^ b[2] ^ b[0] ^ subkeys[31-4*i-3]
|
||||
b[3] = b[3] ^ sbox0[x&0xff] ^ sbox1[(x>>8)&0xff] ^ sbox2[(x>>16)&0xff] ^ sbox3[(x>>24)&0xff]
|
||||
}
|
||||
} else {
|
||||
for i := 0; i < 8; i++ {
|
||||
x = b[1] ^ b[2] ^ b[3] ^ subkeys[4*i]
|
||||
b[0] = b[0] ^ sbox0[x&0xff] ^ sbox1[(x>>8)&0xff] ^ sbox2[(x>>16)&0xff] ^ sbox3[(x>>24)&0xff]
|
||||
x = b[0] ^ b[2] ^ b[3] ^ subkeys[4*i+1]
|
||||
b[1] = b[1] ^ sbox0[x&0xff] ^ sbox1[(x>>8)&0xff] ^ sbox2[(x>>16)&0xff] ^ sbox3[(x>>24)&0xff]
|
||||
x = b[0] ^ b[1] ^ b[3] ^ subkeys[4*i+2]
|
||||
b[2] = b[2] ^ sbox0[x&0xff] ^ sbox1[(x>>8)&0xff] ^ sbox2[(x>>16)&0xff] ^ sbox3[(x>>24)&0xff]
|
||||
x = b[1] ^ b[2] ^ b[0] ^ subkeys[4*i+3]
|
||||
b[3] = b[3] ^ sbox0[x&0xff] ^ sbox1[(x>>8)&0xff] ^ sbox2[(x>>16)&0xff] ^ sbox3[(x>>24)&0xff]
|
||||
}
|
||||
}
|
||||
b[0], b[1], b[2], b[3] = b[3], b[2], b[1], b[0]
|
||||
permuteFinalBlock(r, b)
|
||||
copy(dst, r)
|
||||
}
|
||||
|
||||
func generateSubKeys(key []byte) []uint32 {
|
||||
subkeys := make([]uint32, 32)
|
||||
b := make([]uint32, 4)
|
||||
permuteInitialBlock(b, key)
|
||||
b[0] ^= fk[0]
|
||||
b[1] ^= fk[1]
|
||||
b[2] ^= fk[2]
|
||||
b[3] ^= fk[3]
|
||||
for i := 0; i < 32; i++ {
|
||||
subkeys[i] = feistel0(b[0], b[1], b[2], b[3], ck[i])
|
||||
b[0], b[1], b[2], b[3] = b[1], b[2], b[3], subkeys[i]
|
||||
}
|
||||
return subkeys
|
||||
}
|
||||
|
||||
func EncryptBlock(key SM4Key, dst, src []byte) {
|
||||
subkeys := generateSubKeys(key)
|
||||
cryptBlock(subkeys, make([]uint32, 4), make([]byte, 16), dst, src, false)
|
||||
}
|
||||
|
||||
func DecryptBlock(key SM4Key, dst, src []byte) {
|
||||
subkeys := generateSubKeys(key)
|
||||
cryptBlock(subkeys, make([]uint32, 4), make([]byte, 16), dst, src, true)
|
||||
}
|
||||
|
||||
func ReadKeyFromMem(data []byte, pwd []byte) (SM4Key, error) {
|
||||
block, _ := pem.Decode(data)
|
||||
if x509.IsEncryptedPEMBlock(block) {
|
||||
if block.Type != "SM4 ENCRYPTED KEY" {
|
||||
return nil, errors.New("SM4: unknown type")
|
||||
}
|
||||
if pwd == nil {
|
||||
return nil, errors.New("SM4: need passwd")
|
||||
}
|
||||
data, err := x509.DecryptPEMBlock(block, pwd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
if block.Type != "SM4 KEY" {
|
||||
return nil, errors.New("SM4: unknown type")
|
||||
}
|
||||
return block.Bytes, nil
|
||||
}
|
||||
|
||||
func ReadKeyFromPem(FileName string, pwd []byte) (SM4Key, error) {
|
||||
data, err := ioutil.ReadFile(FileName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ReadKeyFromMem(data, pwd)
|
||||
}
|
||||
|
||||
func WriteKeytoMem(key SM4Key, pwd []byte) ([]byte, error) {
|
||||
if pwd != nil {
|
||||
block, err := x509.EncryptPEMBlock(rand.Reader,
|
||||
"SM4 ENCRYPTED KEY", key, pwd, x509.PEMCipherAES256)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pem.EncodeToMemory(block), nil
|
||||
} else {
|
||||
block := &pem.Block{
|
||||
Type: "SM4 KEY",
|
||||
Bytes: key,
|
||||
}
|
||||
return pem.EncodeToMemory(block), nil
|
||||
}
|
||||
}
|
||||
|
||||
func WriteKeyToPem(FileName string, key SM4Key, pwd []byte) (bool, error) {
|
||||
var block *pem.Block
|
||||
|
||||
if pwd != nil {
|
||||
var err error
|
||||
block, err = x509.EncryptPEMBlock(rand.Reader,
|
||||
"SM4 ENCRYPTED KEY", key, pwd, x509.PEMCipherAES256)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
} else {
|
||||
block = &pem.Block{
|
||||
Type: "SM4 KEY",
|
||||
Bytes: key,
|
||||
}
|
||||
}
|
||||
file, err := os.Create(FileName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer file.Close()
|
||||
err = pem.Encode(file, block)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (k KeySizeError) Error() string {
|
||||
return "SM4: invalid key size " + strconv.Itoa(int(k))
|
||||
}
|
||||
|
||||
// NewCipher creates and returns a new cipher.Block.
|
||||
func NewCipher(key []byte) (cipher.Block, error) {
|
||||
if len(key) != BlockSize {
|
||||
return nil, KeySizeError(len(key))
|
||||
}
|
||||
c := new(Sm4Cipher)
|
||||
c.subkeys = generateSubKeys(key)
|
||||
c.block1 = make([]uint32, 4)
|
||||
c.block2 = make([]byte, 16)
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *Sm4Cipher) BlockSize() int {
|
||||
return BlockSize
|
||||
}
|
||||
|
||||
func (c *Sm4Cipher) Encrypt(dst, src []byte) {
|
||||
cryptBlock(c.subkeys, c.block1, c.block2, dst, src, false)
|
||||
}
|
||||
|
||||
func (c *Sm4Cipher) Decrypt(dst, src []byte) {
|
||||
cryptBlock(c.subkeys, c.block1, c.block2, dst, src, true)
|
||||
}
|
||||
22
vendor/github.com/xtaci/kcp-go/LICENSE
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Daniel Fu
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
172
vendor/github.com/xtaci/kcp-go/README.md
generated
vendored
Normal file
@ -0,0 +1,172 @@
|
||||
<img src="kcp-go.png" alt="kcp-go" height="50px" />
|
||||
|
||||
|
||||
[![GoDoc][1]][2] [![Powered][9]][10] [![MIT licensed][11]][12] [![Build Status][3]][4] [![Go Report Card][5]][6] [![Coverage Statusd][7]][8]
|
||||
|
||||
[1]: https://godoc.org/github.com/xtaci/kcp-go?status.svg
|
||||
[2]: https://godoc.org/github.com/xtaci/kcp-go
|
||||
[3]: https://travis-ci.org/xtaci/kcp-go.svg?branch=master
|
||||
[4]: https://travis-ci.org/xtaci/kcp-go
|
||||
[5]: https://goreportcard.com/badge/github.com/xtaci/kcp-go
|
||||
[6]: https://goreportcard.com/report/github.com/xtaci/kcp-go
|
||||
[7]: https://codecov.io/gh/xtaci/kcp-go/branch/master/graph/badge.svg
|
||||
[8]: https://codecov.io/gh/xtaci/kcp-go
|
||||
[9]: https://img.shields.io/badge/KCP-Powered-blue.svg
|
||||
[10]: https://github.com/skywind3000/kcp
|
||||
[11]: https://img.shields.io/badge/license-MIT-blue.svg
|
||||
[12]: LICENSE
|
||||
|
||||
## Introduction
|
||||
|
||||
**kcp-go** is a **Production-Grade Reliable-UDP** library for [golang](https://golang.org/).
|
||||
|
||||
It provides **fast, ordered and error-checked** delivery of streams over **UDP** packets, has been well tested with opensource project [kcptun](https://github.com/xtaci/kcptun). Millions of devices(from low-end MIPS routers to high-end servers) are running with **kcp-go** at present, including applications like **online games, live broadcasting, file synchronization and network acceleration**.
|
||||
|
||||
[Lastest Release](https://github.com/xtaci/kcp-go/releases)
|
||||
|
||||
## Features
|
||||
|
||||
1. Optimized for **Realtime Online Games, Audio/Video Streaming and Latency-Sensitive Distributed Consensus**.
|
||||
1. Compatible with [skywind3000's](https://github.com/skywind3000) C version with language specific optimizations.
|
||||
1. **Cache friendly** and **Memory optimized** design, offers extremely **High Performance** core.
|
||||
1. Handles **>5K concurrent connections** on a single commodity server.
|
||||
1. Compatible with [net.Conn](https://golang.org/pkg/net/#Conn) and [net.Listener](https://golang.org/pkg/net/#Listener), a drop-in replacement for [net.TCPConn](https://golang.org/pkg/net/#TCPConn).
|
||||
1. [FEC(Forward Error Correction)](https://en.wikipedia.org/wiki/Forward_error_correction) Support with [Reed-Solomon Codes](https://en.wikipedia.org/wiki/Reed%E2%80%93Solomon_error_correction)
|
||||
1. Packet level encryption support with [AES](https://en.wikipedia.org/wiki/Advanced_Encryption_Standard), [TEA](https://en.wikipedia.org/wiki/Tiny_Encryption_Algorithm), [3DES](https://en.wikipedia.org/wiki/Triple_DES), [Blowfish](https://en.wikipedia.org/wiki/Blowfish_(cipher)), [Cast5](https://en.wikipedia.org/wiki/CAST-128), [Salsa20]( https://en.wikipedia.org/wiki/Salsa20), etc. in [CFB](https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Cipher_Feedback_.28CFB.29) mode.
|
||||
1. **Fixed number of goroutines** created for the entire server application, minimized goroutine context switch.
|
||||
|
||||
## Conventions
|
||||
|
||||
Control messages like **SYN/FIN/RST** in TCP **are not defined** in KCP, you need some **keepalive/heartbeat mechanism** in the application-level. A real world example is to use some **multiplexing** protocol over session, such as [smux](https://github.com/xtaci/smux)(with embedded keepalive mechanism), see [kcptun](https://github.com/xtaci/kcptun) for example.
|
||||
|
||||
## Documentation
|
||||
|
||||
For complete documentation, see the associated [Godoc](https://godoc.org/github.com/xtaci/kcp-go).
|
||||
|
||||
## Specification
|
||||
|
||||
<img src="frame.png" alt="Frame Format" height="109px" />
|
||||
|
||||
```
|
||||
+-----------------+
|
||||
| SESSION |
|
||||
+-----------------+
|
||||
| KCP(ARQ) |
|
||||
+-----------------+
|
||||
| FEC(OPTIONAL) |
|
||||
+-----------------+
|
||||
| CRYPTO(OPTIONAL)|
|
||||
+-----------------+
|
||||
| UDP(PACKET) |
|
||||
+-----------------+
|
||||
| IP |
|
||||
+-----------------+
|
||||
| LINK |
|
||||
+-----------------+
|
||||
| PHY |
|
||||
+-----------------+
|
||||
(LAYER MODEL OF KCP-GO)
|
||||
```
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
Client: [full demo](https://github.com/xtaci/kcptun/blob/master/client/main.go)
|
||||
```go
|
||||
kcpconn, err := kcp.DialWithOptions("192.168.0.1:10000", nil, 10, 3)
|
||||
```
|
||||
Server: [full demo](https://github.com/xtaci/kcptun/blob/master/server/main.go)
|
||||
```go
|
||||
lis, err := kcp.ListenWithOptions(":10000", nil, 10, 3)
|
||||
```
|
||||
|
||||
## Performance
|
||||
```
|
||||
Model Name: MacBook Pro
|
||||
Model Identifier: MacBookPro12,1
|
||||
Processor Name: Intel Core i5
|
||||
Processor Speed: 2.7 GHz
|
||||
Number of Processors: 1
|
||||
Total Number of Cores: 2
|
||||
L2 Cache (per Core): 256 KB
|
||||
L3 Cache: 3 MB
|
||||
Memory: 8 GB
|
||||
```
|
||||
```
|
||||
$ go test -v -run=^$ -bench .
|
||||
beginning tests, encryption:salsa20, fec:10/3
|
||||
BenchmarkAES128-4 200000 8256 ns/op 363.33 MB/s 0 B/op 0 allocs/op
|
||||
BenchmarkAES192-4 200000 9153 ns/op 327.74 MB/s 0 B/op 0 allocs/op
|
||||
BenchmarkAES256-4 200000 10079 ns/op 297.64 MB/s 0 B/op 0 allocs/op
|
||||
BenchmarkTEA-4 100000 18643 ns/op 160.91 MB/s 0 B/op 0 allocs/op
|
||||
BenchmarkXOR-4 5000000 316 ns/op 9486.46 MB/s 0 B/op 0 allocs/op
|
||||
BenchmarkBlowfish-4 50000 35643 ns/op 84.17 MB/s 0 B/op 0 allocs/op
|
||||
BenchmarkNone-4 30000000 56.2 ns/op 53371.83 MB/s 0 B/op 0 allocs/op
|
||||
BenchmarkCast5-4 30000 44744 ns/op 67.05 MB/s 0 B/op 0 allocs/op
|
||||
Benchmark3DES-4 2000 639839 ns/op 4.69 MB/s 2 B/op 0 allocs/op
|
||||
BenchmarkTwofish-4 30000 43368 ns/op 69.17 MB/s 0 B/op 0 allocs/op
|
||||
BenchmarkXTEA-4 30000 57673 ns/op 52.02 MB/s 0 B/op 0 allocs/op
|
||||
BenchmarkSalsa20-4 300000 3917 ns/op 765.80 MB/s 0 B/op 0 allocs/op
|
||||
BenchmarkFlush-4 10000000 226 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkEchoSpeed4K-4 5000 300030 ns/op 13.65 MB/s 5672 B/op 177 allocs/op
|
||||
BenchmarkEchoSpeed64K-4 500 3202335 ns/op 20.47 MB/s 73295 B/op 2198 allocs/op
|
||||
BenchmarkEchoSpeed512K-4 50 24926924 ns/op 21.03 MB/s 659339 B/op 17602 allocs/op
|
||||
BenchmarkEchoSpeed1M-4 20 64857821 ns/op 16.17 MB/s 1772437 B/op 42869 allocs/op
|
||||
BenchmarkSinkSpeed4K-4 30000 50230 ns/op 81.54 MB/s 2058 B/op 48 allocs/op
|
||||
BenchmarkSinkSpeed64K-4 2000 648718 ns/op 101.02 MB/s 31165 B/op 687 allocs/op
|
||||
BenchmarkSinkSpeed256K-4 300 4635905 ns/op 113.09 MB/s 286229 B/op 5516 allocs/op
|
||||
BenchmarkSinkSpeed1M-4 200 9566933 ns/op 109.60 MB/s 463771 B/op 10701 allocs/op
|
||||
PASS
|
||||
ok _/Users/xtaci/.godeps/src/github.com/xtaci/kcp-go 39.689s
|
||||
```
|
||||
|
||||
## Design Considerations
|
||||
|
||||
1. slice vs. container/list
|
||||
|
||||
`kcp.flush()` loops through the send queue for retransmission checking for every 20ms(interval).
|
||||
|
||||
I've wrote a benchmark for comparing sequential loop through *slice* and *container/list* here:
|
||||
|
||||
https://github.com/xtaci/notes/blob/master/golang/benchmark2/cachemiss_test.go
|
||||
|
||||
```
|
||||
BenchmarkLoopSlice-4 2000000000 0.39 ns/op
|
||||
BenchmarkLoopList-4 100000000 54.6 ns/op
|
||||
```
|
||||
|
||||
List structure introduces **heavy cache misses** compared to slice which owns better **locality**, 5000 connections with 32 window size and 20ms interval will cost 6us/0.03%(cpu) using slice, and 8.7ms/43.5%(cpu) for list for each `kcp.flush()`.
|
||||
|
||||
2. Timing accuracy vs. syscall clock_gettime
|
||||
|
||||
Timing is **critical** to **RTT estimator**, inaccurate timing introduces false retransmissions in KCP, but calling `time.Now()` costs 42 cycles(10.5ns on 4GHz CPU, 15.6ns on my MacBook Pro 2.7GHz), the benchmark for time.Now():
|
||||
|
||||
https://github.com/xtaci/notes/blob/master/golang/benchmark2/syscall_test.go
|
||||
|
||||
```
|
||||
BenchmarkNow-4 100000000 15.6 ns/op
|
||||
```
|
||||
|
||||
In kcp-go, after each `kcp.output()` function call, current time will be updated upon return, and each `kcp.flush()` will get current time once. For most of the time, 5000 connections costs 5000 * 15.6ns = 78us(no packet needs to be sent by `kcp.output()`), as for 10MB/s data transfering with 1400 MTU, `kcp.output()` will be called around 7500 times and costs 117us for `time.Now()` in **every second**.
|
||||
|
||||
|
||||
## Tuning
|
||||
|
||||
Q: I'm handling >5K connections on my server. the CPU utilization is high.
|
||||
|
||||
A: A standalone `agent` or `gate` server for kcp-go is suggested, not only for CPU utilization, but also important to the **precision** of RTT measurements which indirectly affects retransmission. By increasing update `interval` with `SetNoDelay` like `conn.SetNoDelay(1, 40, 1, 1)` will dramatically reduce system load.
|
||||
|
||||
## Who is using this?
|
||||
|
||||
1. https://github.com/xtaci/kcptun -- A Secure Tunnel Based On KCP over UDP.
|
||||
2. https://github.com/getlantern/lantern -- Lantern delivers fast access to the open Internet.
|
||||
3. https://github.com/smallnest/rpcx -- A RPC service framework based on net/rpc like alibaba Dubbo and weibo Motan.
|
||||
4. https://github.com/gonet2/agent -- A gateway for games with stream multiplexing.
|
||||
5. https://github.com/syncthing/syncthing -- Open Source Continuous File Synchronization.
|
||||
6. https://play.google.com/store/apps/details?id=com.k17game.k3 -- Battle Zone - Earth 2048, a world-wide strategy game.
|
||||
|
||||
## Links
|
||||
|
||||
1. https://github.com/xtaci/libkcp -- FEC enhanced KCP session library for iOS/Android in C++
|
||||
2. https://github.com/skywind3000/kcp -- A Fast and Reliable ARQ Protocol
|
||||
3. https://github.com/templexxx/reedsolomon -- Reed-Solomon Erasure Coding in Go
|
||||
288
vendor/github.com/xtaci/kcp-go/crypt.go
generated
vendored
Normal file
@ -0,0 +1,288 @@
|
||||
package kcp
|
||||
|
||||
import (
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"crypto/des"
|
||||
"crypto/sha1"
|
||||
|
||||
"github.com/templexxx/xor"
|
||||
"github.com/tjfoc/gmsm/sm4"
|
||||
|
||||
"golang.org/x/crypto/blowfish"
|
||||
"golang.org/x/crypto/cast5"
|
||||
"golang.org/x/crypto/pbkdf2"
|
||||
"golang.org/x/crypto/salsa20"
|
||||
"golang.org/x/crypto/tea"
|
||||
"golang.org/x/crypto/twofish"
|
||||
"golang.org/x/crypto/xtea"
|
||||
)
|
||||
|
||||
var (
|
||||
initialVector = []byte{167, 115, 79, 156, 18, 172, 27, 1, 164, 21, 242, 193, 252, 120, 230, 107}
|
||||
saltxor = `sH3CIVoF#rWLtJo6`
|
||||
)
|
||||
|
||||
// BlockCrypt defines encryption/decryption methods for a given byte slice.
|
||||
// Notes on implementing: the data to be encrypted contains a builtin
|
||||
// nonce at the first 16 bytes
|
||||
type BlockCrypt interface {
|
||||
// Encrypt encrypts the whole block in src into dst.
|
||||
// Dst and src may point at the same memory.
|
||||
Encrypt(dst, src []byte)
|
||||
|
||||
// Decrypt decrypts the whole block in src into dst.
|
||||
// Dst and src may point at the same memory.
|
||||
Decrypt(dst, src []byte)
|
||||
}
|
||||
|
||||
type salsa20BlockCrypt struct {
|
||||
key [32]byte
|
||||
}
|
||||
|
||||
// NewSalsa20BlockCrypt https://en.wikipedia.org/wiki/Salsa20
|
||||
func NewSalsa20BlockCrypt(key []byte) (BlockCrypt, error) {
|
||||
c := new(salsa20BlockCrypt)
|
||||
copy(c.key[:], key)
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *salsa20BlockCrypt) Encrypt(dst, src []byte) {
|
||||
salsa20.XORKeyStream(dst[8:], src[8:], src[:8], &c.key)
|
||||
copy(dst[:8], src[:8])
|
||||
}
|
||||
func (c *salsa20BlockCrypt) Decrypt(dst, src []byte) {
|
||||
salsa20.XORKeyStream(dst[8:], src[8:], src[:8], &c.key)
|
||||
copy(dst[:8], src[:8])
|
||||
}
|
||||
|
||||
type sm4BlockCrypt struct {
|
||||
encbuf []byte
|
||||
decbuf []byte
|
||||
block cipher.Block
|
||||
}
|
||||
|
||||
// NewSM4BlockCrypt https://github.com/tjfoc/gmsm/tree/master/sm4
|
||||
func NewSM4BlockCrypt(key []byte) (BlockCrypt, error) {
|
||||
c := new(sm4BlockCrypt)
|
||||
block, err := sm4.NewCipher(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.block = block
|
||||
c.encbuf = make([]byte, sm4.BlockSize)
|
||||
c.decbuf = make([]byte, 2*sm4.BlockSize)
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *sm4BlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf) }
|
||||
func (c *sm4BlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf) }
|
||||
|
||||
type twofishBlockCrypt struct {
|
||||
encbuf []byte
|
||||
decbuf []byte
|
||||
block cipher.Block
|
||||
}
|
||||
|
||||
// NewTwofishBlockCrypt https://en.wikipedia.org/wiki/Twofish
|
||||
func NewTwofishBlockCrypt(key []byte) (BlockCrypt, error) {
|
||||
c := new(twofishBlockCrypt)
|
||||
block, err := twofish.NewCipher(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.block = block
|
||||
c.encbuf = make([]byte, twofish.BlockSize)
|
||||
c.decbuf = make([]byte, 2*twofish.BlockSize)
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *twofishBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf) }
|
||||
func (c *twofishBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf) }
|
||||
|
||||
type tripleDESBlockCrypt struct {
|
||||
encbuf []byte
|
||||
decbuf []byte
|
||||
block cipher.Block
|
||||
}
|
||||
|
||||
// NewTripleDESBlockCrypt https://en.wikipedia.org/wiki/Triple_DES
|
||||
func NewTripleDESBlockCrypt(key []byte) (BlockCrypt, error) {
|
||||
c := new(tripleDESBlockCrypt)
|
||||
block, err := des.NewTripleDESCipher(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.block = block
|
||||
c.encbuf = make([]byte, des.BlockSize)
|
||||
c.decbuf = make([]byte, 2*des.BlockSize)
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *tripleDESBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf) }
|
||||
func (c *tripleDESBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf) }
|
||||
|
||||
type cast5BlockCrypt struct {
|
||||
encbuf []byte
|
||||
decbuf []byte
|
||||
block cipher.Block
|
||||
}
|
||||
|
||||
// NewCast5BlockCrypt https://en.wikipedia.org/wiki/CAST-128
|
||||
func NewCast5BlockCrypt(key []byte) (BlockCrypt, error) {
|
||||
c := new(cast5BlockCrypt)
|
||||
block, err := cast5.NewCipher(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.block = block
|
||||
c.encbuf = make([]byte, cast5.BlockSize)
|
||||
c.decbuf = make([]byte, 2*cast5.BlockSize)
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *cast5BlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf) }
|
||||
func (c *cast5BlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf) }
|
||||
|
||||
type blowfishBlockCrypt struct {
|
||||
encbuf []byte
|
||||
decbuf []byte
|
||||
block cipher.Block
|
||||
}
|
||||
|
||||
// NewBlowfishBlockCrypt https://en.wikipedia.org/wiki/Blowfish_(cipher)
|
||||
func NewBlowfishBlockCrypt(key []byte) (BlockCrypt, error) {
|
||||
c := new(blowfishBlockCrypt)
|
||||
block, err := blowfish.NewCipher(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.block = block
|
||||
c.encbuf = make([]byte, blowfish.BlockSize)
|
||||
c.decbuf = make([]byte, 2*blowfish.BlockSize)
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *blowfishBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf) }
|
||||
func (c *blowfishBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf) }
|
||||
|
||||
type aesBlockCrypt struct {
|
||||
encbuf []byte
|
||||
decbuf []byte
|
||||
block cipher.Block
|
||||
}
|
||||
|
||||
// NewAESBlockCrypt https://en.wikipedia.org/wiki/Advanced_Encryption_Standard
|
||||
func NewAESBlockCrypt(key []byte) (BlockCrypt, error) {
|
||||
c := new(aesBlockCrypt)
|
||||
block, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.block = block
|
||||
c.encbuf = make([]byte, aes.BlockSize)
|
||||
c.decbuf = make([]byte, 2*aes.BlockSize)
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *aesBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf) }
|
||||
func (c *aesBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf) }
|
||||
|
||||
type teaBlockCrypt struct {
|
||||
encbuf []byte
|
||||
decbuf []byte
|
||||
block cipher.Block
|
||||
}
|
||||
|
||||
// NewTEABlockCrypt https://en.wikipedia.org/wiki/Tiny_Encryption_Algorithm
|
||||
func NewTEABlockCrypt(key []byte) (BlockCrypt, error) {
|
||||
c := new(teaBlockCrypt)
|
||||
block, err := tea.NewCipherWithRounds(key, 16)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.block = block
|
||||
c.encbuf = make([]byte, tea.BlockSize)
|
||||
c.decbuf = make([]byte, 2*tea.BlockSize)
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *teaBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf) }
|
||||
func (c *teaBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf) }
|
||||
|
||||
type xteaBlockCrypt struct {
|
||||
encbuf []byte
|
||||
decbuf []byte
|
||||
block cipher.Block
|
||||
}
|
||||
|
||||
// NewXTEABlockCrypt https://en.wikipedia.org/wiki/XTEA
|
||||
func NewXTEABlockCrypt(key []byte) (BlockCrypt, error) {
|
||||
c := new(xteaBlockCrypt)
|
||||
block, err := xtea.NewCipher(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.block = block
|
||||
c.encbuf = make([]byte, xtea.BlockSize)
|
||||
c.decbuf = make([]byte, 2*xtea.BlockSize)
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *xteaBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf) }
|
||||
func (c *xteaBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf) }
|
||||
|
||||
type simpleXORBlockCrypt struct {
|
||||
xortbl []byte
|
||||
}
|
||||
|
||||
// NewSimpleXORBlockCrypt simple xor with key expanding
|
||||
func NewSimpleXORBlockCrypt(key []byte) (BlockCrypt, error) {
|
||||
c := new(simpleXORBlockCrypt)
|
||||
c.xortbl = pbkdf2.Key(key, []byte(saltxor), 32, mtuLimit, sha1.New)
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *simpleXORBlockCrypt) Encrypt(dst, src []byte) { xor.Bytes(dst, src, c.xortbl) }
|
||||
func (c *simpleXORBlockCrypt) Decrypt(dst, src []byte) { xor.Bytes(dst, src, c.xortbl) }
|
||||
|
||||
type noneBlockCrypt struct{}
|
||||
|
||||
// NewNoneBlockCrypt does nothing but copying
|
||||
func NewNoneBlockCrypt(key []byte) (BlockCrypt, error) {
|
||||
return new(noneBlockCrypt), nil
|
||||
}
|
||||
|
||||
func (c *noneBlockCrypt) Encrypt(dst, src []byte) { copy(dst, src) }
|
||||
func (c *noneBlockCrypt) Decrypt(dst, src []byte) { copy(dst, src) }
|
||||
|
||||
// packet encryption with local CFB mode
|
||||
func encrypt(block cipher.Block, dst, src, buf []byte) {
|
||||
blocksize := block.BlockSize()
|
||||
tbl := buf[:blocksize]
|
||||
block.Encrypt(tbl, initialVector)
|
||||
n := len(src) / blocksize
|
||||
base := 0
|
||||
for i := 0; i < n; i++ {
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
block.Encrypt(tbl, dst[base:])
|
||||
base += blocksize
|
||||
}
|
||||
xor.BytesSrc0(dst[base:], src[base:], tbl)
|
||||
}
|
||||
|
||||
func decrypt(block cipher.Block, dst, src, buf []byte) {
|
||||
blocksize := block.BlockSize()
|
||||
tbl := buf[:blocksize]
|
||||
next := buf[blocksize:]
|
||||
block.Encrypt(tbl, initialVector)
|
||||
n := len(src) / blocksize
|
||||
base := 0
|
||||
for i := 0; i < n; i++ {
|
||||
block.Encrypt(next, src[base:])
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
tbl, next = next, tbl
|
||||
base += blocksize
|
||||
}
|
||||
xor.BytesSrc0(dst[base:], src[base:], tbl)
|
||||
}
|
||||
BIN
vendor/github.com/xtaci/kcp-go/donate.png
generated
vendored
Normal file
|
After Width: | Height: | Size: 4.3 KiB |
303
vendor/github.com/xtaci/kcp-go/fec.go
generated
vendored
Normal file
@ -0,0 +1,303 @@
|
||||
package kcp
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/templexxx/reedsolomon"
|
||||
)
|
||||
|
||||
const (
|
||||
fecHeaderSize = 6
|
||||
fecHeaderSizePlus2 = fecHeaderSize + 2 // plus 2B data size
|
||||
typeData = 0xf1
|
||||
typeFEC = 0xf2
|
||||
)
|
||||
|
||||
type (
|
||||
// fecPacket is a decoded FEC packet
|
||||
fecPacket struct {
|
||||
seqid uint32
|
||||
flag uint16
|
||||
data []byte
|
||||
}
|
||||
|
||||
// fecDecoder for decoding incoming packets
|
||||
fecDecoder struct {
|
||||
rxlimit int // queue size limit
|
||||
dataShards int
|
||||
parityShards int
|
||||
shardSize int
|
||||
rx []fecPacket // ordered receive queue
|
||||
|
||||
// caches
|
||||
decodeCache [][]byte
|
||||
flagCache []bool
|
||||
|
||||
// RS decoder
|
||||
codec reedsolomon.Encoder
|
||||
}
|
||||
)
|
||||
|
||||
func newFECDecoder(rxlimit, dataShards, parityShards int) *fecDecoder {
|
||||
if dataShards <= 0 || parityShards <= 0 {
|
||||
return nil
|
||||
}
|
||||
if rxlimit < dataShards+parityShards {
|
||||
return nil
|
||||
}
|
||||
|
||||
fec := new(fecDecoder)
|
||||
fec.rxlimit = rxlimit
|
||||
fec.dataShards = dataShards
|
||||
fec.parityShards = parityShards
|
||||
fec.shardSize = dataShards + parityShards
|
||||
enc, err := reedsolomon.New(dataShards, parityShards)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
fec.codec = enc
|
||||
fec.decodeCache = make([][]byte, fec.shardSize)
|
||||
fec.flagCache = make([]bool, fec.shardSize)
|
||||
return fec
|
||||
}
|
||||
|
||||
// decodeBytes a fec packet
|
||||
func (dec *fecDecoder) decodeBytes(data []byte) fecPacket {
|
||||
var pkt fecPacket
|
||||
pkt.seqid = binary.LittleEndian.Uint32(data)
|
||||
pkt.flag = binary.LittleEndian.Uint16(data[4:])
|
||||
// allocate memory & copy
|
||||
buf := xmitBuf.Get().([]byte)[:len(data)-6]
|
||||
copy(buf, data[6:])
|
||||
pkt.data = buf
|
||||
return pkt
|
||||
}
|
||||
|
||||
// decode a fec packet
|
||||
func (dec *fecDecoder) decode(pkt fecPacket) (recovered [][]byte) {
|
||||
// insertion
|
||||
n := len(dec.rx) - 1
|
||||
insertIdx := 0
|
||||
for i := n; i >= 0; i-- {
|
||||
if pkt.seqid == dec.rx[i].seqid { // de-duplicate
|
||||
xmitBuf.Put(pkt.data)
|
||||
return nil
|
||||
} else if _itimediff(pkt.seqid, dec.rx[i].seqid) > 0 { // insertion
|
||||
insertIdx = i + 1
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// insert into ordered rx queue
|
||||
if insertIdx == n+1 {
|
||||
dec.rx = append(dec.rx, pkt)
|
||||
} else {
|
||||
dec.rx = append(dec.rx, fecPacket{})
|
||||
copy(dec.rx[insertIdx+1:], dec.rx[insertIdx:]) // shift right
|
||||
dec.rx[insertIdx] = pkt
|
||||
}
|
||||
|
||||
// shard range for current packet
|
||||
shardBegin := pkt.seqid - pkt.seqid%uint32(dec.shardSize)
|
||||
shardEnd := shardBegin + uint32(dec.shardSize) - 1
|
||||
|
||||
// max search range in ordered queue for current shard
|
||||
searchBegin := insertIdx - int(pkt.seqid%uint32(dec.shardSize))
|
||||
if searchBegin < 0 {
|
||||
searchBegin = 0
|
||||
}
|
||||
searchEnd := searchBegin + dec.shardSize - 1
|
||||
if searchEnd >= len(dec.rx) {
|
||||
searchEnd = len(dec.rx) - 1
|
||||
}
|
||||
|
||||
// re-construct datashards
|
||||
if searchEnd-searchBegin+1 >= dec.dataShards {
|
||||
var numshard, numDataShard, first, maxlen int
|
||||
|
||||
// zero cache
|
||||
shards := dec.decodeCache
|
||||
shardsflag := dec.flagCache
|
||||
for k := range dec.decodeCache {
|
||||
shards[k] = nil
|
||||
shardsflag[k] = false
|
||||
}
|
||||
|
||||
// shard assembly
|
||||
for i := searchBegin; i <= searchEnd; i++ {
|
||||
seqid := dec.rx[i].seqid
|
||||
if _itimediff(seqid, shardEnd) > 0 {
|
||||
break
|
||||
} else if _itimediff(seqid, shardBegin) >= 0 {
|
||||
shards[seqid%uint32(dec.shardSize)] = dec.rx[i].data
|
||||
shardsflag[seqid%uint32(dec.shardSize)] = true
|
||||
numshard++
|
||||
if dec.rx[i].flag == typeData {
|
||||
numDataShard++
|
||||
}
|
||||
if numshard == 1 {
|
||||
first = i
|
||||
}
|
||||
if len(dec.rx[i].data) > maxlen {
|
||||
maxlen = len(dec.rx[i].data)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if numDataShard == dec.dataShards {
|
||||
// case 1: no lost data shards
|
||||
dec.rx = dec.freeRange(first, numshard, dec.rx)
|
||||
} else if numshard >= dec.dataShards {
|
||||
// case 2: data shard lost, but recoverable from parity shard
|
||||
for k := range shards {
|
||||
if shards[k] != nil {
|
||||
dlen := len(shards[k])
|
||||
shards[k] = shards[k][:maxlen]
|
||||
xorBytes(shards[k][dlen:], shards[k][dlen:], shards[k][dlen:])
|
||||
}
|
||||
}
|
||||
if err := dec.codec.ReconstructData(shards); err == nil {
|
||||
for k := range shards[:dec.dataShards] {
|
||||
if !shardsflag[k] {
|
||||
recovered = append(recovered, shards[k])
|
||||
}
|
||||
}
|
||||
}
|
||||
dec.rx = dec.freeRange(first, numshard, dec.rx)
|
||||
}
|
||||
}
|
||||
|
||||
// keep rxlimit
|
||||
if len(dec.rx) > dec.rxlimit {
|
||||
if dec.rx[0].flag == typeData { // record unrecoverable data
|
||||
atomic.AddUint64(&DefaultSnmp.FECShortShards, 1)
|
||||
}
|
||||
dec.rx = dec.freeRange(0, 1, dec.rx)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// free a range of fecPacket, and zero for GC recycling
|
||||
func (dec *fecDecoder) freeRange(first, n int, q []fecPacket) []fecPacket {
|
||||
for i := first; i < first+n; i++ { // free
|
||||
xmitBuf.Put(q[i].data)
|
||||
}
|
||||
copy(q[first:], q[first+n:])
|
||||
for i := 0; i < n; i++ { // dereference data
|
||||
q[len(q)-1-i].data = nil
|
||||
}
|
||||
return q[:len(q)-n]
|
||||
}
|
||||
|
||||
type (
|
||||
// fecEncoder for encoding outgoing packets
|
||||
fecEncoder struct {
|
||||
dataShards int
|
||||
parityShards int
|
||||
shardSize int
|
||||
paws uint32 // Protect Against Wrapped Sequence numbers
|
||||
next uint32 // next seqid
|
||||
|
||||
shardCount int // count the number of datashards collected
|
||||
maxSize int // record maximum data length in datashard
|
||||
|
||||
headerOffset int // FEC header offset
|
||||
payloadOffset int // FEC payload offset
|
||||
|
||||
// caches
|
||||
shardCache [][]byte
|
||||
encodeCache [][]byte
|
||||
|
||||
// RS encoder
|
||||
codec reedsolomon.Encoder
|
||||
}
|
||||
)
|
||||
|
||||
func newFECEncoder(dataShards, parityShards, offset int) *fecEncoder {
|
||||
if dataShards <= 0 || parityShards <= 0 {
|
||||
return nil
|
||||
}
|
||||
fec := new(fecEncoder)
|
||||
fec.dataShards = dataShards
|
||||
fec.parityShards = parityShards
|
||||
fec.shardSize = dataShards + parityShards
|
||||
fec.paws = (0xffffffff/uint32(fec.shardSize) - 1) * uint32(fec.shardSize)
|
||||
fec.headerOffset = offset
|
||||
fec.payloadOffset = fec.headerOffset + fecHeaderSize
|
||||
|
||||
enc, err := reedsolomon.New(dataShards, parityShards)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
fec.codec = enc
|
||||
|
||||
// caches
|
||||
fec.encodeCache = make([][]byte, fec.shardSize)
|
||||
fec.shardCache = make([][]byte, fec.shardSize)
|
||||
for k := range fec.shardCache {
|
||||
fec.shardCache[k] = make([]byte, mtuLimit)
|
||||
}
|
||||
return fec
|
||||
}
|
||||
|
||||
// encode the packet, output parity shards if we have enough datashards
|
||||
// the content of returned parityshards will change in next encode
|
||||
func (enc *fecEncoder) encode(b []byte) (ps [][]byte) {
|
||||
enc.markData(b[enc.headerOffset:])
|
||||
binary.LittleEndian.PutUint16(b[enc.payloadOffset:], uint16(len(b[enc.payloadOffset:])))
|
||||
|
||||
// copy data to fec datashards
|
||||
sz := len(b)
|
||||
enc.shardCache[enc.shardCount] = enc.shardCache[enc.shardCount][:sz]
|
||||
copy(enc.shardCache[enc.shardCount], b)
|
||||
enc.shardCount++
|
||||
|
||||
// record max datashard length
|
||||
if sz > enc.maxSize {
|
||||
enc.maxSize = sz
|
||||
}
|
||||
|
||||
// calculate Reed-Solomon Erasure Code
|
||||
if enc.shardCount == enc.dataShards {
|
||||
// bzero each datashard's tail
|
||||
for i := 0; i < enc.dataShards; i++ {
|
||||
shard := enc.shardCache[i]
|
||||
slen := len(shard)
|
||||
xorBytes(shard[slen:enc.maxSize], shard[slen:enc.maxSize], shard[slen:enc.maxSize])
|
||||
}
|
||||
|
||||
// construct equal-sized slice with stripped header
|
||||
cache := enc.encodeCache
|
||||
for k := range cache {
|
||||
cache[k] = enc.shardCache[k][enc.payloadOffset:enc.maxSize]
|
||||
}
|
||||
|
||||
// rs encode
|
||||
if err := enc.codec.Encode(cache); err == nil {
|
||||
ps = enc.shardCache[enc.dataShards:]
|
||||
for k := range ps {
|
||||
enc.markFEC(ps[k][enc.headerOffset:])
|
||||
ps[k] = ps[k][:enc.maxSize]
|
||||
}
|
||||
}
|
||||
|
||||
// reset counters to zero
|
||||
enc.shardCount = 0
|
||||
enc.maxSize = 0
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (enc *fecEncoder) markData(data []byte) {
|
||||
binary.LittleEndian.PutUint32(data, enc.next)
|
||||
binary.LittleEndian.PutUint16(data[4:], typeData)
|
||||
enc.next++
|
||||
}
|
||||
|
||||
func (enc *fecEncoder) markFEC(data []byte) {
|
||||
binary.LittleEndian.PutUint32(data, enc.next)
|
||||
binary.LittleEndian.PutUint16(data[4:], typeFEC)
|
||||
enc.next = (enc.next + 1) % enc.paws
|
||||
}
|
||||