Datasets:
Tasks:
Text Classification
Modalities:
Text
Formats:
parquet
Sub-tasks:
multi-label-classification
Languages:
go
Size:
1K - 10K
License:
Dataset Viewer
code
stringlengths 67
15.9k
| labels
sequencelengths 1
4
|
---|---|
package taglog
// A map type specific to tags. The value type must be either string or []string.
// Users should avoid modifying the map directly and instead use the provided
// functions.
type Tags map[string]interface{}
// Add one or more values to a key.
func (t Tags) Add(key string, value ...string) {
for _, v := range value {
switch vs := t[key].(type) {
case nil:
t[key] = v
case string:
t[key] = []string{vs, v}
case []string:
t[key] = append(vs, v)
}
}
}
// Add one or more values to a key, merging any duplicate values.
func (t Tags) Merge(key string, value ...string) {
for _, v := range value {
current := t.GetAll(key)
found := false
for _, cv := range current {
if v == cv {
found = true
break
}
}
if !found {
t.Add(key, v)
}
}
}
// Append one or more values to a key. This the same as Add() and is only
// provided to couple with Pop() for code clarity.
func (t Tags) Push(key string, value ...string) {
t.Add(key, value...)
}
// Remove the last value for a key
func (t Tags) Pop(key string) {
switch vs := t[key].(type) {
case nil:
return
case string:
delete(t, key)
case []string:
if len(vs) <= 1 {
delete(t, key)
} else if len(vs) == 2 {
t[key] = vs[0]
} else {
t[key] = vs[:len(vs)-1]
}
}
}
// Set one or more values for a key. Any existing values are discarded.
func (t Tags) Set(key string, value ...string) {
delete(t, key)
t.Add(key, value...)
}
// Get the first value for a key. If the key does not exist, an empty string is
// returned.
func (t Tags) Get(key string) string {
switch vs := t[key].(type) {
case string:
return vs
case []string:
return vs[0]
}
return ""
}
// Get all the values for a key. If the key does not exist, a nil slice is
// returned.
func (t Tags) GetAll(key string) []string {
switch vs := t[key].(type) {
case string:
return []string{vs}
case []string:
return vs
}
return nil
}
// Delete a key.
func (t Tags) Del(key string) {
delete(t, key)
}
// Delete all keys.
func (t Tags) DelAll() {
for k, _ := range t {
delete(t, k)
}
}
// Export all tags as a map of string slices.
func (t Tags) Export() map[string][]string {
tags := make(map[string][]string)
for k, v := range t {
switch vs := v.(type) {
case string:
tags[k] = []string{vs}
case []string:
ts := make([]string, len(vs))
copy(ts, vs)
tags[k] = ts
}
}
return tags
}
// Import tags from a map of string slices.
func (t Tags) Import(tags map[string][]string) {
for k, v := range tags {
t.Merge(k, v...)
}
}
// Copy tags. Performs a deep copy of all tag values.
func (t Tags) Copy() Tags {
out := make(Tags)
out.Import(t.Export())
return out
}
| [
5
] |
/******************************************************************************
*
* Copyright 2020 SAP SE
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
******************************************************************************/
package keppelv1_test
import (
"net/http"
"testing"
"github.com/sapcc/go-bits/assert"
"github.com/sapcc/keppel/internal/keppel"
"github.com/sapcc/keppel/internal/test"
)
func TestPeersAPI(t *testing.T) {
s := test.NewSetup(t, test.WithKeppelAPI)
h := s.Handler
//check empty response when there are no peers in the DB
assert.HTTPRequest{
Method: "GET",
Path: "/keppel/v1/peers",
Header: map[string]string{"X-Test-Perms": "view:tenant1"},
ExpectStatus: http.StatusOK,
ExpectBody: assert.JSONObject{"peers": []interface{}{}},
}.Check(t, h)
//add some peers
expectedPeers := []assert.JSONObject{
{"hostname": "keppel.example.com"},
{"hostname": "keppel.example.org"},
}
for _, peer := range expectedPeers {
err := s.DB.Insert(&keppel.Peer{HostName: peer["hostname"].(string)})
if err != nil {
t.Fatal(err)
}
}
//check non-empty response
assert.HTTPRequest{
Method: "GET",
Path: "/keppel/v1/peers",
Header: map[string]string{"X-Test-Perms": "view:tenant1"},
ExpectStatus: http.StatusOK,
ExpectBody: assert.JSONObject{"peers": expectedPeers},
}.Check(t, h)
}
| [
3
] |
package router
import (
"github.com/lovego/goa"
)
type fieldCommentPair struct {
Field string
Comment string
}
type ResBodyTpl struct {
Code string `json:"code" c:"ok 表示成功,其他表示错误代码"`
Message string `json:"message" c:"与code对应的描述信息"`
Data interface{} `json:"data"`
}
const (
TypeReqBody uint8 = iota
TypeResBody
TypeErrResBody
)
// TODO
type roundTripBody struct {
Type uint8 // 请求体/成功返回体/错误返回体
Desc string
Body interface{}
}
type routerInfo struct {
Path string
Method string
Title string
Desc string // 描述
ReqContentType string
RegComments []fieldCommentPair
QueryComments []fieldCommentPair
// 保存请求体/成功返回体/错误返回体,数据的数组。并以此顺序生成文档。
RoundTripBodies []roundTripBody
//Req interface{}
//SucRes interface{}
//ErrRes []ResBodyTpl
IsEntry bool // 是否 api 接口
}
type R struct {
Info routerInfo
RouterGroup *goa.RouterGroup
Nodes []*R
}
func NewRoot(r *goa.RouterGroup) *R {
return New(r, ``)
}
func New(r *goa.RouterGroup, path string) *R {
return &R{
Info: routerInfo{
Path: path,
ReqContentType: `application/json`,
RegComments: make([]fieldCommentPair, 0),
QueryComments: make([]fieldCommentPair, 0),
RoundTripBodies: make([]roundTripBody, 0),
},
RouterGroup: r,
Nodes: make([]*R, 0),
}
}
func NewEntry(r *goa.RouterGroup, path string) *R {
entry := New(r, path)
entry.Info.IsEntry = true
return entry
}
func (r *R) Group(path string) *R {
group := r.RouterGroup.Group(path)
child := New(group, path)
r.Nodes = append(r.Nodes, child)
return child
}
func (r *R) GetX(path string, handlerFunc func(*goa.Context)) *R {
child := NewEntry(r.RouterGroup.Get(path, handlerFunc), path)
child.Info.Method = `GET`
r.Nodes = append(r.Nodes, child)
return child
}
func (r *R) Get(path string, handlerFunc func(*goa.Context)) *R {
child := NewEntry(r.RouterGroup.Get(path, handlerFunc), path)
child.Info.Method = `GET`
r.Nodes = append(r.Nodes, child)
return child
}
func (r *R) PostX(path string, handlerFunc func(*goa.Context)) *R {
child := NewEntry(r.RouterGroup.Post(path, handlerFunc), path)
child.Info.Method = `POST`
r.Nodes = append(r.Nodes, child)
return child
}
func (r *R) Post(path string, handlerFunc func(*goa.Context)) *R {
child := NewEntry(r.RouterGroup.Post(path, handlerFunc), path)
child.Info.Method = `POST`
r.Nodes = append(r.Nodes, child)
return child
}
func (r *R) PutX(path string, handlerFunc func(*goa.Context)) *R {
child := NewEntry(r.RouterGroup.Put(path, handlerFunc), path)
child.Info.Method = `PUT`
r.Nodes = append(r.Nodes, child)
return child
}
func (r *R) Put(path string, handlerFunc func(*goa.Context)) *R {
child := NewEntry(r.RouterGroup.Put(path, handlerFunc), path)
child.Info.Method = `PUT`
r.Nodes = append(r.Nodes, child)
return child
}
func (r *R) PatchX(path string, handlerFunc func(*goa.Context)) *R {
child := NewEntry(r.RouterGroup.Patch(path, handlerFunc), path)
child.Info.Method = `PATCH`
r.Nodes = append(r.Nodes, child)
return child
}
func (r *R) Patch(path string, handlerFunc func(*goa.Context)) *R {
child := NewEntry(r.RouterGroup.Patch(path, handlerFunc), path)
child.Info.Method = `PATCH`
r.Nodes = append(r.Nodes, child)
return child
}
func (r *R) DeleteX(path string, handlerFunc func(*goa.Context)) *R {
child := NewEntry(r.RouterGroup.Delete(path, handlerFunc), path)
child.Info.Method = `DELETE`
r.Nodes = append(r.Nodes, child)
return child
}
func (r *R) Delete(path string, handlerFunc func(*goa.Context)) *R {
child := NewEntry(r.RouterGroup.Delete(path, handlerFunc), path)
child.Info.Method = `DELETE`
r.Nodes = append(r.Nodes, child)
return child
}
| [
3
] |
package main
import (
"net/http"
"log"
"io/ioutil"
"encoding/json"
"github.com/gorilla/mux"
"time"
"fmt"
)
const URL = "https://api.opendota.com/api/proplayers"
var allPlayers []Player
var playerMap map[string]Player
func init() {
playerMap = make(map[string]Player)
}
func main() {
channel := make(chan []Player)
playerClient := http.Client{
Timeout: time.Second * 10,
}
go func(channel chan []Player) {
for {
players := <- channel
for _, item := range players {
playerMap[item.Name] = item
}
}
}(channel)
go RefreshPLayersArray(&playerClient, channel)
// start the server since we have initialized all data
router := mux.NewRouter()
router.HandleFunc("/", func(responseWriter http.ResponseWriter, request *http.Request) {
// read from a file
key := request.Header.Get("name")
if player, ok := playerMap[key]; ok {
bytes, _ := json.Marshal(player)
responseWriter.Write([]byte(bytes))
}else{
http.Error(responseWriter, "Not found", 505)
}
})
http.Handle("/", router)
log.Fatal(http.ListenAndServe(":8080", router))
}
func RefreshPLayersArray(client *http.Client, channel chan []Player) {
var playersToReturn []Player
request, err := http.NewRequest(http.MethodGet, URL, nil)
if err != nil {
log.Fatal(err)
}
request.Header.Set("player-name", "name")
response, getErr := client.Do(request)
if getErr != nil {
log.Fatal(getErr)
}
body, readErr := ioutil.ReadAll(response.Body)
if readErr != nil {
log.Fatal(readErr)
}
jsonErr := json.Unmarshal(body, &playersToReturn)
if jsonErr != nil {
log.Fatal(jsonErr)
}
//write to the channel
channel <- playersToReturn
fmt.Println("Refreshing data")
time.Sleep(5 * time.Minute)
go RefreshPLayersArray(client, channel)
}
| [
3
] |
//example with pointer receiver
package main
import (
"fmt"
)
type Person struct {
name string
age int
}
func (p *Person) fn(name1 string) {
p.name = name1
}
func main(){
p1:=&Person{name:"jacob",age:23}
p1.fn("ryan")
fmt.Println(p1.name);
} | [
3
] |
package netmodule
import (
"net"
"sync"
"sync/atomic"
)
//tcpsocket 对net.Conn 的包装
type tcpsocket struct {
conn net.Conn //TCP底层连接
buffers [2]*buffer //双发送缓存
sendIndex uint //发送缓存索引
notify chan int //通知通道
isclose uint32 //指示socket是否关闭
m sync.Mutex //锁
bclose bool //是否关闭
writeIndex uint //插入缓存索引
}
//newtcpsocket 创建一个tcpsocket
func newtcpsocket(c net.Conn) *tcpsocket {
if c == nil {
//c为nil,抛出异常
panic("c is nil")
}
//初始化结构体
var psocket = new(tcpsocket)
psocket.conn = c
psocket.buffers[0] = new(buffer)
psocket.buffers[1] = new(buffer)
psocket.sendIndex = 0
psocket.notify = make(chan int, 1)
psocket.isclose = 0
psocket.bclose = false
psocket.writeIndex = 1
//启动发送协程
go psocket._dosend()
return psocket
}
func (my *tcpsocket) _dosend() {
writeErr := false
for {
_, ok := <-my.notify
if !ok {
return
}
my.m.Lock()
my.writeIndex = my.sendIndex
my.m.Unlock()
my.sendIndex = (my.sendIndex + 1) % 2
if !writeErr {
var sendSplice = my.buffers[my.sendIndex].Data()
for len(sendSplice) > 0 {
n, err := my.conn.Write(sendSplice)
if err != nil {
writeErr = true
break
}
sendSplice = sendSplice[n:]
}
}
my.buffers[my.sendIndex].Clear()
}
}
//Read 读数据
func (my *tcpsocket) Read(b []byte) (n int, err error) {
return my.conn.Read(b)
}
//WriteBytes 写数据
func (my *tcpsocket) Write(b ...[]byte) {
my.m.Lock()
if my.bclose {
my.m.Unlock()
return
}
dataLen := my.buffers[my.writeIndex].Len()
writeLen := 0
for i := 0; i < len(b); i++ {
writeLen += len(b[i])
my.buffers[my.writeIndex].Append(b[i])
}
if dataLen == 0 && writeLen != 0 {
my.notify <- 0
}
my.m.Unlock()
}
//Close 关闭一个tcpsocket, 释放系统资源
func (my *tcpsocket) Close() {
my.m.Lock()
if my.bclose {
my.m.Unlock()
return
}
my.bclose = true
my.conn.Close()
close(my.notify)
my.m.Unlock()
atomic.StoreUint32(&(my.isclose), 1)
}
//IsClose 判断tcpsocket是否关闭
func (my *tcpsocket) IsClose() bool {
val := atomic.LoadUint32(&(my.isclose))
if val > 0 {
return true
}
return false
}
| [
3
] |
package parser
import (
"github.com/almostmoore/kadastr/feature"
"github.com/almostmoore/kadastr/rapi"
"gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
"log"
"strconv"
"sync"
"time"
)
type FeatureParser struct {
session *mgo.Session
fRepo feature.FeatureRepository
rClient *rapi.Client
}
func NewFeatureParser(session *mgo.Session) FeatureParser {
return FeatureParser{
session: session,
fRepo: feature.NewFeatureRepository(session),
rClient: rapi.NewClient(),
}
}
// Run function starts parsing
func (f *FeatureParser) Run(quarter string, streams int64) {
var maxUnit int64 = 10000
done := make(chan bool, streams)
errors := make(chan bool, streams)
items := make(chan int64, maxUnit)
defer close(done)
defer close(errors)
defer close(items)
wg := &sync.WaitGroup{}
var i int64
for i = 0; i < streams; i++ {
wg.Add(1)
go f.parse(quarter, items, errors, done, wg)
}
go f.checkError(errors, done, streams)
go func() {
for i = 0; i < maxUnit; i++ {
items <- i
}
}()
wg.Wait()
}
func (f *FeatureParser) checkError(errors chan bool, done chan bool, streams int64) {
errCount := 0
for has := range errors {
if has {
errCount += 1
} else {
errCount = 0
}
if errCount == 200 {
var i int64
for i = 0; i < streams; i++ {
done <- true
}
}
}
}
// parse data from rosreestr
func (f *FeatureParser) parse(quarter string, items <-chan int64, errors, done chan bool, wg *sync.WaitGroup) {
for {
select {
case i := <-items:
result := f.parseItem(quarter, i)
errors <- !result
case <-done:
wg.Done()
return
default:
}
}
}
// parseItem Parse item for quarter
func (f *FeatureParser) parseItem(quarter string, item int64) bool {
time.Sleep(5 * time.Second)
number := quarter + ":" + strconv.FormatInt(item, 10)
log.Printf("Парсинг участка %s\n", number)
ft, err := f.rClient.GetFeature(number)
if err != nil || ft.CadNumber == "" {
log.Printf("Участок не найден %s (%s)\n", number, err)
return false
}
_, err = f.fRepo.FindByCadNumber(ft.CadNumber)
if err == nil {
log.Printf("Участок %s уже присутствует в базе данных. Пропускаем\n", ft.CadNumber)
return true
}
ft.ID = bson.NewObjectId()
err = f.fRepo.Insert(ft)
if err != nil {
log.Println(err)
} else {
log.Printf("Участок сохранен %s\n", number)
}
return true
}
| [
6
] |
package rsa
import (
"CryptCode/utils"
"crypto"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/pem"
"flag"
"fmt"
"io/ioutil"
"os"
)
const RSA_PRIVATE = "RSA PRIVATE KEY"
const RSA_PUBLIC = "RSA PUBLIC KEY"
/**
* 私钥:
* 公钥:
* 汉森堡
*/
func CreatePairKeys() (*rsa.PrivateKey,error) {
//1、先生成私钥
//var bits int
//flag.IntVar(&bits, "b", 1024, "密钥长度")
////fmt.Println(bits)
//privateKey, err := rsa.GenerateKey(rand.Reader, bits)
//if err != nil {
// return nil, err
//}
//
////2、根据私钥生成公钥
//publicKey := privateKey.Public()
//fmt.Println(publicKey)
////3、将私钥和公钥进行返回
//return privateKey, nil
//1.生成私钥
var bits int
flag.IntVar(&bits,"b",1024,"密钥长度")
pri,err:=rsa.GenerateKey(rand.Reader,bits)
if err!=nil {
return nil,err
}
return pri,nil
//2.根据私钥生成公钥
publicKeY:=pri.Public()
fmt.Println(publicKeY)
//3.返回私钥公
return pri,nil
}
//---------------------关于pem证书文件的生成和读取------------------------
/**
* 根据用户传入的内容,自动创建公私钥,并生成相应格式的证书文件
*/
func GenerateKeys(file_name string) error {
//1、生成私钥
//pri, err := CreatePairKeys()
//if err != nil {
// return err
//}
////2.创建私钥文件
//err = generatePriFileByPrivateKey(pri, file_name)
//if err != nil {
// return err
//}
////3、公钥文件
//err = generatePubFileByPubKey(pri.PublicKey, file_name)
//if err != nil {
// return err
//}
//return nil
pri,err:=CreatePairKeys()
if err!=nil {
return nil
}
err=generatePriFileByPrivateKey(pri,file_name)
if err!=nil {
return nil
}
err =generatePubFileByPubKey(pri.PublicKey,file_name)
if err!=nil {
return err
}
return nil
}
/**
* 读取pem文件格式的私钥数据
*/
func ReadPemPriKey(file_name string) (*rsa.PrivateKey, error) {
//blockBytes, err := ioutil.ReadFile(file_name)
// //if err != nil {
// // return nil, err
// //}
// ////pem.decode:将byte数据解码为内存中的实例对象
// //block, _ := pem.Decode(blockBytes)
// //
// //priBytes := block.Bytes
// //priKey, err := x509.ParsePKCS1PrivateKey(priBytes)
// //return priKey, err
blockBytes,err:=ioutil.ReadFile(file_name)
if err!=nil {
return nil,err
}
block,_:=pem.Decode(blockBytes)
priBytes:=block.Bytes
pri,err:=x509.ParsePKCS1PrivateKey(priBytes)
return pri,err
}
/**
* 读取pem文件格式的公钥数据
*/
func ReadPemPubKey(file_name string) (*rsa.PublicKey, error) {
//blockBytes, err := ioutil.ReadFile(file_name)
//if err != nil {
// return nil, err
//}
//block, _ := pem.Decode(blockBytes)
//pubKey, err := x509.ParsePKCS1PublicKey(block.Bytes)
//return pubKey, err
blockBytes,err:=ioutil.ReadFile(file_name)
if err!=nil {
return nil,err
}
block,_:=pem.Decode(blockBytes)
pub,err:=x509.ParsePKCS1PublicKey(block.Bytes)
if err!=nil {
return nil,err
}
return pub,nil
}
/**
* 根据给定的私钥数据,生成对应的pem文件
*/
func generatePriFileByPrivateKey(pri *rsa.PrivateKey, file_name string) (error) {
//根据PKCS1规则,序列化后的私钥
//priStream := x509.MarshalPKCS1PrivateKey(pri)
//
////pem文件,此时,privateFile文件为空
//privatFile, err := os.Create("rsa_pri_" + file_name + ".pem") //存私钥的生成的文件
//if err != nil {
// return err
//}
//
////pem文件中的格式 结构体
//block := &pem.Block{
// Type: RSA_PRIVATE,
// Bytes: priStream,
//}
//
////将准备好的格式内容写入到pem文件中
//err = pem.Encode(privatFile, block)
//if err != nil {
// return err
//}
//return nil
priSteam:=x509.MarshalPKCS1PrivateKey(pri)
privatFile,err:=os.Create("rsa_pri_"+file_name+".pem")
if err!=nil {
return err
}
block:=&pem.Block{
Type:RSA_PRIVATE,
Bytes:priSteam,
}
err=pem.Encode(privatFile,block)
if err!=nil {
return err
}
return nil
}
/**
* 根据公钥生成对应的pem文件,持久化存储
*/
func generatePubFileByPubKey(pub rsa.PublicKey, file_name string) error {
//stream := x509.MarshalPKCS1PublicKey(&pub)
//
//block := pem.Block{
// Type: RSA_PUBLIC,
// Bytes: stream,
//}
//
//pubFile, err := os.Create("rsa_pub_" + file_name + ".pem")
//if err != nil {
// return err
//}
//return pem.Encode(pubFile, &block)
pubStream:=x509.MarshalPKCS1PublicKey(&pub)
block:=pem.Block{
Type: RSA_PUBLIC,
Bytes: pubStream,
}
pubFile,err:=os.Create("rsa_pub_"+file_name+".pem")
if err!=nil {
return err
}
return pem.Encode(pubFile,&block)
}
//=========================第一种组合:公钥加密,私钥解密==============================//
/**
* 使用RSA算法对数据进行加密,返回加密后的密文
*/
func RSAEncrypt(key rsa.PublicKey, data []byte) ([]byte, error) {
return rsa.EncryptPKCS1v15(rand.Reader,&key,data)
//return rsa.EncryptPKCS1v15(rand.Reader, &key, data)
}
/**
* 使用RSA算法对密文数据进行解密,返回解密后的明文
*/
func RSADecrypt(private *rsa.PrivateKey, cipher []byte) ([]byte, error) {
return rsa.DecryptPKCS1v15(rand.Reader,private,cipher)
//return rsa.DecryptPKCS1v15(rand.Reader, private, cipher)
}
//=========================第二种组合:私钥签名,公钥验签==============================//
/**
* 使用RSA算法对数据进行数字签名,并返回签名信息
*/
func RSASign(private *rsa.PrivateKey, data []byte) ([]byte, error) {
hashed:=utils.Md5Hash(data)
rsa.SignPKCS1v15(rand.Reader,private,crypto.MD5,hashed)
//hashed := utils.Md5Hash(data)
//return rsa.SignPKCS1v15(rand.Reader, private, crypto.MD5, hashed)
}
/**
* 使用RSA算法对数据进行签名验证,并返回签名验证的结果
* 验证通过,返回true
* 验证不通过,返回false, 同时error中有错误信息
*/
func RSAVerify(pub rsa.PublicKey, data []byte, signText []byte) (bool, error) {
hashed := utils.Md5Hash(data)
err := rsa.VerifyPKCS1v15(&pub, crypto.MD5, hashed, signText)
if err!=nil {
return false,err
}
return true,nil
}
| [
3,
6
] |
package Routes
import (
"freshers-bootcamp/week1/day4/Controllers"
"github.com/gin-gonic/gin"
)
//SetupRouter ... Configure routes
func SetupRouter() *gin.Engine {
r := gin.Default()
grp1 := r.Group("/user-api")
{
grp1.GET("products", Controllers.GetUsers)
grp1.POST("product", Controllers.CreateProd)
grp1.GET("product/:id", Controllers.GetProdByID)
grp1.PATCH("product/:id", Controllers.UpdateProd)
grp1.DELETE("product/:id", Controllers.DeleteUser)
grp1.POST("order", Controllers.CreateOrder)
}
return r
} | [
3
] |
package main
import (
"bufio"
"fmt"
"io"
"log"
"os"
"strconv"
"strings"
)
type testCase struct {
rows int
cols int
grid [][]int
}
type testCaseOrErr struct {
testCase
err error
}
func main() {
reader := bufio.NewReader(os.Stdin)
testCases := loadTestCasesToChannel(reader)
var testIx int
for test := range testCases {
testIx++
if test.err != nil {
log.Fatal(test.err)
}
numAdditions := makeRabbitHouseSafe(&test.testCase)
fmt.Printf("Case #%d: %d\n", testIx, numAdditions)
}
}
func makeRabbitHouseSafe(house *testCase) (totalHeightIncrease int) {
buckets := newHeightBuckets(&house.grid)
for {
if buckets.maxHeight == 0 {
break
}
totalHeightIncrease += secureNextLocation(buckets, house)
}
return
}
func secureNextLocation(buckets *heightBuckets, house *testCase) (addedHeight int) {
loc := buckets.getLocationAtMaxHeight()
locHeight := getLocationHeight(loc, house)
defer buckets.removeLocation(locHeight, loc)
for _, neighbor := range getNeighborLocations(loc, house) {
neighborHeight := getLocationHeight(neighbor, house)
heightDiff := locHeight - neighborHeight
if heightDiff > 1 {
addedHeight += heightDiff - 1
buckets.insertLocation(locHeight-1, neighbor)
setLocationHeight(locHeight-1, neighbor, house)
buckets.removeLocation(neighborHeight, neighbor)
}
}
return
}
func setLocationHeight(height int, loc location, house *testCase) {
house.grid[loc.row][loc.col] = height
}
func getLocationHeight(loc location, house *testCase) (height int) {
return house.grid[loc.row][loc.col]
}
func getNeighborLocations(loc location, house *testCase) (neighbors []location) {
if loc.row > 0 {
neighbors = append(neighbors, location{loc.row - 1, loc.col})
}
if loc.col < house.cols-1 {
neighbors = append(neighbors, location{loc.row, loc.col + 1})
}
if loc.row < house.rows-1 {
neighbors = append(neighbors, location{loc.row + 1, loc.col})
}
if loc.col > 0 {
neighbors = append(neighbors, location{loc.row, loc.col - 1})
}
return
}
type location struct {
row, col int
}
type heightBuckets struct {
buckets map[int]map[location]struct{}
maxHeight int
}
func (b *heightBuckets) getLocationAtMaxHeight() location {
loc, err := b.getLocationAtHeight(b.maxHeight)
if err != nil {
log.Fatal(err)
}
return loc
}
func (b *heightBuckets) getLocationAtHeight(height int) (loc location, err error) {
for loc = range b.buckets[height] {
return loc, err
}
return loc, fmt.Errorf("no location found at height: %d", height)
}
func (b *heightBuckets) insertLocation(height int, loc location) {
if _, ok := b.buckets[height]; !ok {
b.buckets[height] = map[location]struct{}{}
}
b.buckets[height][loc] = struct{}{}
if height > b.maxHeight {
b.maxHeight = height
}
}
func (b *heightBuckets) removeLocation(height int, loc location) {
delete(b.buckets[height], loc)
if len(b.buckets[height]) == 0 {
delete(b.buckets, height)
}
if height == b.maxHeight {
b.decreaseMaxHeight()
}
}
func (b *heightBuckets) decreaseMaxHeight() {
if len(b.buckets) == 0 {
b.maxHeight = 0
return
}
for {
if _, ok := b.buckets[b.maxHeight]; !ok {
b.maxHeight--
} else {
break
}
}
}
func newHeightBuckets(grid *[][]int) *heightBuckets {
ret := heightBuckets{
buckets: make(map[int]map[location]struct{}),
maxHeight: 0,
}
for rowIx, row := range *grid {
for colIx, height := range row {
ret.insertLocation(height, location{rowIx, colIx})
}
}
return &ret
}
// -------- Input reading -------- //
func newTestCase(rows, cols int, heights [][]int) testCase {
return testCase{
rows,
cols,
heights,
}
}
func newTestCaseOrErr(rows, cols int, grid [][]int, err error) testCaseOrErr {
return testCaseOrErr{
newTestCase(rows, cols, grid),
err,
}
}
func parseIntFields(line string) (ints []int, err error) {
for _, field := range strings.Fields(line) {
convField, err := strconv.Atoi(field)
if err != nil {
return []int{}, err
}
ints = append(ints, convField)
}
return
}
func parseIntsFromNextLine(reader *bufio.Reader) (ints []int, err error) {
line, err := reader.ReadString('\n')
if err != nil && err != io.EOF {
return
}
return parseIntFields(line)
}
func parseRowAndColNum(reader *bufio.Reader) (row, col int, err error) {
intFields, err := parseIntsFromNextLine(reader)
if err != nil {
return
}
if len(intFields) != 2 {
err = fmt.Errorf("number of int fields in first line of test case not equal to 2")
return
}
row = intFields[0]
col = intFields[1]
return
}
func parseNumTestCases(reader *bufio.Reader) (numTestCases int, err error) {
firstLineInts, err := parseIntsFromNextLine(reader)
if err != nil {
return
}
if len(firstLineInts) != 1 {
err = fmt.Errorf("unexpected number of ints in test case number definition")
return
}
numTestCases = firstLineInts[0]
return
}
func parseGrid(rows int, cols int, reader *bufio.Reader) ([][]int, error) {
grid := make([][]int, rows)
for i := 0; i < rows; i++ {
row, err := parseIntsFromNextLine(reader)
if err != nil {
return grid, err
}
grid[i] = row
}
return grid, nil
}
func loadTestCasesToChannel(reader *bufio.Reader) <-chan testCaseOrErr {
out := make(chan testCaseOrErr)
go func() {
defer close(out)
numberOfTestCases, err := parseNumTestCases(reader)
if err != nil {
out <- testCaseOrErr{err: err}
return
}
for i := 0; i < numberOfTestCases; i++ {
rows, cols, err := parseRowAndColNum(reader)
if err != nil {
out <- testCaseOrErr{err: err}
return
}
grid, err := parseGrid(rows, cols, reader)
out <- newTestCaseOrErr(rows, cols, grid, err)
}
}()
return out
}
| [
6
] |
package utils
import (
"bytes"
"crypto/rand"
"encoding/json"
"fmt"
"log"
"math"
"math/big"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
"github.com/parnurzeal/gorequest"
"golang.org/x/xerrors"
pb "gopkg.in/cheggaaa/pb.v1"
)
var vulnListDir = filepath.Join(CacheDir(), "vuln-list")
func CacheDir() string {
cacheDir, err := os.UserCacheDir()
if err != nil {
cacheDir = os.TempDir()
}
dir := filepath.Join(cacheDir, "vuln-list-update")
return dir
}
func SetVulnListDir(dir string) {
vulnListDir = dir
}
func VulnListDir() string {
return vulnListDir
}
func SaveCVEPerYear(dirPath string, cveID string, data interface{}) error {
s := strings.Split(cveID, "-")
if len(s) != 3 {
return xerrors.Errorf("invalid CVE-ID format: %s\n", cveID)
}
yearDir := filepath.Join(dirPath, s[1])
if err := os.MkdirAll(yearDir, os.ModePerm); err != nil {
return err
}
filePath := filepath.Join(yearDir, fmt.Sprintf("%s.json", cveID))
if err := Write(filePath, data); err != nil {
return xerrors.Errorf("failed to write file: %w", err)
}
return nil
}
func Write(filePath string, data interface{}) error {
dir := filepath.Dir(filePath)
if err := os.MkdirAll(dir, os.ModePerm); err != nil {
return xerrors.Errorf("failed to create %s: %w", dir, err)
}
f, err := os.Create(filePath)
if err != nil {
return xerrors.Errorf("file create error: %w", err)
}
defer f.Close()
b, err := json.MarshalIndent(data, "", " ")
if err != nil {
return xerrors.Errorf("JSON marshal error: %w", err)
}
_, err = f.Write(b)
if err != nil {
return xerrors.Errorf("file write error: %w", err)
}
return nil
}
// GenWorkers generate workders
func GenWorkers(num, wait int) chan<- func() {
tasks := make(chan func())
for i := 0; i < num; i++ {
go func() {
for f := range tasks {
f()
time.Sleep(time.Duration(wait) * time.Second)
}
}()
}
return tasks
}
// DeleteNil deletes nil in errs
func DeleteNil(errs []error) (new []error) {
for _, err := range errs {
if err != nil {
new = append(new, err)
}
}
return new
}
// TrimSpaceNewline deletes space character and newline character(CR/LF)
func TrimSpaceNewline(str string) string {
str = strings.TrimSpace(str)
return strings.Trim(str, "\r\n")
}
// FetchURL returns HTTP response body with retry
func FetchURL(url, apikey string, retry int) (res []byte, err error) {
for i := 0; i <= retry; i++ {
if i > 0 {
wait := math.Pow(float64(i), 2) + float64(RandInt()%10)
log.Printf("retry after %f seconds\n", wait)
time.Sleep(time.Duration(time.Duration(wait) * time.Second))
}
res, err = fetchURL(url, map[string]string{"api-key": apikey})
if err == nil {
return res, nil
}
}
return nil, xerrors.Errorf("failed to fetch URL: %w", err)
}
func RandInt() int {
seed, _ := rand.Int(rand.Reader, big.NewInt(math.MaxInt64))
return int(seed.Int64())
}
func fetchURL(url string, headers map[string]string) ([]byte, error) {
req := gorequest.New().Get(url)
for key, value := range headers {
req.Header.Add(key, value)
}
resp, body, errs := req.Type("text").EndBytes()
if len(errs) > 0 {
return nil, xerrors.Errorf("HTTP error. url: %s, err: %w", url, errs[0])
}
if resp.StatusCode != 200 {
return nil, xerrors.Errorf("HTTP error. status code: %d, url: %s", resp.StatusCode, url)
}
return body, nil
}
// FetchConcurrently fetches concurrently
func FetchConcurrently(urls []string, concurrency, wait, retry int) (responses [][]byte, err error) {
reqChan := make(chan string, len(urls))
resChan := make(chan []byte, len(urls))
errChan := make(chan error, len(urls))
defer close(reqChan)
defer close(resChan)
defer close(errChan)
go func() {
for _, url := range urls {
reqChan <- url
}
}()
bar := pb.StartNew(len(urls))
tasks := GenWorkers(concurrency, wait)
for range urls {
tasks <- func() {
url := <-reqChan
res, err := FetchURL(url, "", retry)
if err != nil {
errChan <- err
return
}
resChan <- res
}
bar.Increment()
}
bar.Finish()
var errs []error
timeout := time.After(10 * 60 * time.Second)
for range urls {
select {
case res := <-resChan:
responses = append(responses, res)
case err := <-errChan:
errs = append(errs, err)
case <-timeout:
return nil, xerrors.New("Timeout Fetching URL")
}
}
if 0 < len(errs) {
return responses, fmt.Errorf("%s", errs)
}
return responses, nil
}
// Major returns major version
func Major(osVer string) (majorVersion string) {
return strings.Split(osVer, ".")[0]
}
func IsCommandAvailable(name string) bool {
cmd := exec.Command(name, "--help")
if err := cmd.Run(); err != nil {
return false
}
return true
}
func Exists(path string) (bool, error) {
_, err := os.Stat(path)
if err == nil {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return true, err
}
func Exec(command string, args []string) (string, error) {
cmd := exec.Command(command, args...)
var stdoutBuf, stderrBuf bytes.Buffer
cmd.Stdout = &stdoutBuf
cmd.Stderr = &stderrBuf
if err := cmd.Run(); err != nil {
log.Println(stderrBuf.String())
return "", xerrors.Errorf("failed to exec: %w", err)
}
return stdoutBuf.String(), nil
}
func LookupEnv(key, defaultValue string) string {
if val, ok := os.LookupEnv(key); ok {
return val
}
return defaultValue
}
| [
1,
6
] |
package utils
const (
FloatType = CounterType("float64")
UintType = CounterType("uint64")
)
type CounterType string
type Counter interface {
Add(interface{})
Clone() Counter
Value() interface{}
}
type IntCounter uint64
func (ic *IntCounter) Add(num interface{}) {
switch n := num.(type) {
case uint64:
*ic = *ic + IntCounter(n)
case IntCounter:
*ic = *ic + n
case int:
*ic = *ic + IntCounter(n)
case float64:
*ic = *ic + IntCounter(n)
default:
}
}
func (ic *IntCounter) Clone() Counter {
counter := new(IntCounter)
counter.Add(*ic)
return counter
}
func (ic *IntCounter) Value() interface{} {
return uint64(*ic)
}
type NopCounter struct{}
func (nc *NopCounter) Add(num interface{}) {
}
func (nc *NopCounter) Clone() Counter {
return &NopCounter{}
}
func (nc *NopCounter) Value() interface{} { return &NopCounter{} }
| [
0
] |
package client
import (
"encoding/binary"
"encoding/json"
"errors"
protocol "github.com/sniperHW/flyfish/proto"
"reflect"
"unsafe"
)
const CompressSize = 16 * 1024 //对超过这个大小的blob字段执行压缩
type Field protocol.Field
func (this *Field) IsNil() bool {
return (*protocol.Field)(this).IsNil()
}
func (this *Field) GetString() string {
return (*protocol.Field)(this).GetString()
}
func (this *Field) GetInt() int64 {
return (*protocol.Field)(this).GetInt()
}
func (this *Field) GetFloat() float64 {
return (*protocol.Field)(this).GetFloat()
}
func (this *Field) GetBlob() []byte {
return (*protocol.Field)(this).GetBlob()
}
func (this *Field) GetValue() interface{} {
return (*protocol.Field)(this).GetValue()
}
func UnmarshalJsonField(field *Field, obj interface{}) error {
if field == nil {
return nil
} else {
v := field.GetValue()
switch v.(type) {
case string, []byte:
var b []byte
switch v.(type) {
case []byte:
b = v.([]byte)
case string:
s := v.(string)
b = *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
Len: int(len(s)),
Cap: int(len(s)),
Data: (*reflect.StringHeader)(unsafe.Pointer(&s)).Data,
}))
}
if len(b) == 0 {
return nil
} else {
return json.Unmarshal(b, obj)
}
default:
return nil
}
}
}
//对大小>=1k的[]byte字段,执行压缩
func PackField(key string, v interface{}) *protocol.Field {
switch v.(type) {
case []byte:
b := v.([]byte)
var bb []byte
if len(b) >= CompressSize {
bb, _ = getCompressor().Compress(b)
size := make([]byte, 4)
binary.BigEndian.PutUint32(size, uint32(len(bb)+4))
bb = append(bb, size...)
} else {
bb = b
}
return protocol.PackField(key, bb)
default:
return protocol.PackField(key, v)
}
}
func UnpackField(f *protocol.Field) (*Field, error) {
var err error
if nil != f {
switch f.GetValue().(type) {
case []byte:
b := f.GetBlob()
if ok, size := checkHeader(b); ok {
if len(b) >= size+4 {
if size = int(binary.BigEndian.Uint32(b[len(b)-4:])); size == len(b) {
if b, err = getDecompressor().Decompress(b[:len(b)-4]); nil == err {
return (*Field)(protocol.PackField(f.Name, b)), err
}
} else {
err = errors.New("flyfish client unpackField:invaild filed1")
}
} else {
err = errors.New("flyfish client unpackField:invaild filed2")
}
if nil != err {
return (*Field)(protocol.PackField(f.Name, []byte{})), err
}
}
}
}
return (*Field)(f), err
}
| [
3,
7
] |
package werckerclient
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"strings"
"github.com/jtacoma/uritemplates"
)
// NewClient creates a new Client. It merges the default Config together with
// config.
func NewClient(config *Config) *Client {
c := &Client{config: defaultConfig.Merge(config)}
return c
}
// Client is the wercker api client.
type Client struct {
config *Config
}
// Do makes a request to the wercker api servers.
func (c *Client) Do(method string, urlTemplate *uritemplates.UriTemplate, urlModel interface{}, payload interface{}, result interface{}) error {
body, err := c.DoRaw(method, urlTemplate, urlModel, payload)
if err != nil {
return err
}
if len(body) > 0 {
err = json.Unmarshal(body, result)
if err != nil {
return err
}
}
return nil
}
// DoRaw makes a full request but returns the result as a byte array
func (c *Client) DoRaw(method string, urlTemplate *uritemplates.UriTemplate, urlModel interface{}, payload interface{}) ([]byte, error) {
path, err := expandURL(urlTemplate, urlModel)
if err != nil {
return nil, err
}
var payloadReader io.Reader
if payload != nil {
b, err := json.Marshal(payload)
if err != nil {
return nil, err
}
payloadReader = bytes.NewReader(b)
}
return c.makeRequest(method, path, payloadReader)
}
func (c *Client) generateURL(path string) string {
endpoint := strings.TrimRight(c.config.Endpoint, "/")
return endpoint + path
}
// MakeRequest makes a request to the wercker API, and returns the returned
// payload
func (c *Client) makeRequest(method string, path string, payload io.Reader) ([]byte, error) {
url := c.generateURL(path)
req, err := http.NewRequest(method, url, payload)
if err != nil {
return nil, err
}
if c.config.Credentials != nil {
// Add credentials
creds, err := c.config.Credentials.GetCredentials()
if err != nil {
return nil, err
}
if creds.Token != "" {
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", creds.Token))
}
if creds.Username != "" && creds.Password != "" {
req.SetBasicAuth(creds.Username, creds.Password)
}
}
req.Header.Set("Content-Type", "application/json")
resp, err := c.config.HTTPClient.Do(req)
if err != nil {
return nil, err
}
if resp.StatusCode >= 200 && resp.StatusCode < 400 {
if resp.ContentLength != 0 {
body, err := ioutil.ReadAll(resp.Body)
defer resp.Body.Close()
if err != nil {
return nil, err
}
return body, nil
}
return nil, nil
}
return nil, c.handleError(resp)
}
// ErrResponse is a generic error object using wercker api conventions.
type ErrResponse struct {
StatusCode int `json:"statusCode"`
StatusMessage string `json:"error"`
Message string `json:"message"`
}
// Error returns the wercker error message
func (e *ErrResponse) Error() string {
return e.Message
}
func (c *Client) handleError(resp *http.Response) error {
if resp.ContentLength > 0 {
body, err := ioutil.ReadAll(resp.Body)
defer resp.Body.Close()
// Continue if we were able to read the response
if err == nil {
v := &ErrResponse{}
err := json.Unmarshal(body, v)
// Continue if we were able to unmarshal the JSON
if err == nil {
return v
}
}
}
return fmt.Errorf("Unable to parse error response (status code: %d)", resp.StatusCode)
}
func expandURL(urlTemplate *uritemplates.UriTemplate, urlModel interface{}) (string, error) {
var m map[string]interface{}
var ok bool
var path string
var err error
if urlModel != nil {
m, ok = struct2map(urlModel)
if !ok {
return "", errors.New("Invalid URL model")
}
if m != nil {
path, err = urlTemplate.Expand(m)
if err != nil {
return "", err
}
}
} else {
path = urlTemplate.String()
}
return path, nil
}
| [
6
] |
package gospelmaria
import (
"context"
"database/sql"
"errors"
"fmt"
"strings"
"time"
"github.com/VividCortex/ewma"
"github.com/jmalloc/gospel/src/gospel"
"github.com/jmalloc/gospel/src/internal/metrics"
"github.com/jmalloc/gospel/src/internal/options"
"github.com/jmalloc/twelf/src/twelf"
"golang.org/x/time/rate"
)
const (
// averageLatencyAge is average age of samples to keep when computing the
// average latency. A sample is taken after each poll.
//
// Averages are computed using an exponentially-weighted moving average.
// See https://github.com/VividCortex/ewma for more information.
averageLatencyAge = 20.0
)
// Reader is an interface for reading facts from a stream stored in MariaDB.
type Reader struct {
// stmt is a prepared statement used to query for facts.
// It accepts the stream offset as a parameter.
stmt *sql.Stmt
// logger is the target for debug logging. Readers do not perform general
// activity logging.
logger twelf.Logger
// facts is a channel on which facts are delivered to the caller of Next().
// A worker goroutine polls the database and delivers the facts to this
// channel.
facts chan gospel.Fact
// current is the fact returned by Get() until Next() is called again.
current *gospel.Fact
// next is the fact that will become "current" when Next() is called.
// If it is nil, no additional facts were available in the buffer on the
// previous call to Next().
next *gospel.Fact
// end is a signaling channel that is closed when the database polling
// goroutine fetches 0 facts.
end chan struct{}
// done is a signaling channel which is closed when the database polling
// goroutine returns. The error that caused the closure, if any, is sent to
// the channel before it closed. This means a pending call to Next() will
// return the error when it first occurs, but subsequent calls will return
// a more generic "reader is closed" error.
done chan error
// ctx is a context that is canceled when Close() is called, or when the
// database polling goroutine returns. It is used to abort any in-progress
// database queries or rate-limit pauses when the reader is closed.
//
// Context cancellation errors are not sent to the 'done' channel, so any
// pending Next() call will receive a generic "reader is closed" error.
ctx context.Context
cancel func()
// addr is the starting address for the next database poll.
addr gospel.Address
// globalLimit is a rate-limiter that limits the number of polling queries
// that can be performed each second. It is shared by all readers, and hence
// provides a global cap of the number of read queries per second.
globalLimit *rate.Limiter
// adaptiveLimit is a rate-limiter that is adjusted on-the-fly in an attempt
// to balance the number of database polls against the latency of facts.
// It is not shared by other readers.
adaptiveLimit *rate.Limiter
// acceptableLatency is the amount of latency that is generally acceptable
// for the purposes of this reader. The reader will attempt to maintain this
// latency by adjusting its polling rate.
acceptableLatency time.Duration
// starvationLatency is the amount of latency that is acceptable once the
// reader has reached the end of the stream and is "starving" for facts.
// This setting informs the minimum poll rate.
starvationLatency time.Duration
// instantaneousLatency is the latency computed from the facts returend by
// the most recent database poll. If there are no facts the latency is 0.
instantaneousLatency time.Duration
// averageLatency tracks the average latency of the last 10 database polls.
// The average latency is weighed against the acceptableLatency and
// starvationLatency values to decide how the poll rate is adjusted.
averageLatency ewma.MovingAverage
// debug contains several properties that are only relevant when the reader
// is using a debug logger.
debug *readerDebug
}
// readerDebug contains several properties that are only relevant when the
// reader is using a debug logger.
type readerDebug struct {
// opts is the options specified when opening the reader.
opts *options.ReaderOptions
// averagePollRate keeps track of the average polling rate, which can be
// substantially lower than the adaptive limit for slow readers.
averagePollRate *metrics.RateCounter
// averageFactRate keeps track of the average rate of delivery of facts.
averageFactRate *metrics.RateCounter
// previousPollRate is compared to the poll rate after each poll to
// determine whether a log message should be displayed.
previousPollRate rate.Limit
// muteEmptyPolls is true if the previous database poll did not return any
// facts. It is only used to mute repeated debug messages if there is no new
// information to report.
muteEmptyPolls bool
}
// errReaderClosed is an error returned by Next() when it is called on a closed
// reader, or when the reader is closed while a call to Next() is pending.
var errReaderClosed = errors.New("reader is closed")
// openReader returns a new reader that begins at addr.
func openReader(
ctx context.Context,
db *sql.DB,
storeID uint64,
addr gospel.Address,
limit *rate.Limiter,
logger twelf.Logger,
opts *options.ReaderOptions,
) (*Reader, error) {
// Note that runCtx is NOT derived from ctx, which is only used for the
// opening of the reader itself.
runCtx, cancel := context.WithCancel(context.Background())
accetableLatency := getAcceptableLatency(opts)
r := &Reader{
logger: logger,
facts: make(chan gospel.Fact, getReadBufferSize(opts)),
end: make(chan struct{}),
done: make(chan error, 1),
ctx: runCtx,
cancel: cancel,
addr: addr,
globalLimit: limit,
adaptiveLimit: rate.NewLimiter(rate.Every(accetableLatency), 1),
acceptableLatency: accetableLatency,
starvationLatency: getStarvationLatency(opts),
averageLatency: ewma.NewMovingAverage(averageLatencyAge),
}
if logger.IsDebug() {
r.debug = &readerDebug{
opts: opts,
averagePollRate: metrics.NewRateCounter(),
averageFactRate: metrics.NewRateCounter(),
}
}
if err := r.prepareStatement(ctx, db, storeID, opts); err != nil {
return nil, err
}
r.logInitialization()
go r.run()
return r, nil
}
// Next blocks until a fact is available for reading or ctx is canceled.
//
// If err is nil, the "current" fact is ready to be returned by Get().
//
// nx is the offset within the stream that the reader has reached. It can be
// used to efficiently resume reading in a future call to EventStore.Open().
//
// Note that nx is not always the address immediately following the fact
// returned by Get() - it may be "further ahead" in the stream, this skipping
// over any facts that the reader is not interested in.
func (r *Reader) Next(ctx context.Context) (nx gospel.Address, err error) {
nx, _, err = r.tryNext(ctx, nil)
return nx, err
}
// TryNext blocks until the next fact is available for reading, the end of
// stream is reached, or ctx is canceled.
//
// If ok is true, a new fact is available and is ready to be returned by
// Get(). ok is false if the current fact is the last known fact in the
// stream.
//
// nx is the offset within the stream that the reader has reached. It can be
// used to efficiently resume reading in a future call to EventStore.Open().
// nx is invalid if ok is false.
func (r *Reader) TryNext(ctx context.Context) (nx gospel.Address, ok bool, err error) {
return r.tryNext(ctx, r.end)
}
func (r *Reader) tryNext(ctx context.Context, end <-chan struct{}) (nx gospel.Address, ok bool, err error) {
if r.next == nil {
select {
case f := <-r.facts:
r.current = &f
ok = true
case <-end:
// no fact is available, return with ok == false
return
case <-ctx.Done():
err = ctx.Err()
return
case err = <-r.done:
if err == nil {
err = errReaderClosed
}
return
}
} else {
r.current = r.next
r.next = nil
ok = true
}
// Perform a non-blocking lookahead to see if we have the next fact already.
select {
case f := <-r.facts:
r.next = &f
nx = r.next.Addr
default:
// assume next is literally the next fact on the stream
nx = r.current.Addr.Next()
}
return
}
// Get returns the "current" fact.
//
// It panics if Next() has not been called.
// Get() returns the same Fact until Next() is called again.
func (r *Reader) Get() gospel.Fact {
if r.current == nil {
panic("Next() must be called before calling Get()")
}
return *r.current
}
// Close closes the reader.
func (r *Reader) Close() error {
select {
case err := <-r.done:
return err
default:
r.cancel()
return <-r.done
}
}
// prepareStatement creates r.stmt, an SQL prepared statement used to poll
// for new facts.
func (r *Reader) prepareStatement(
ctx context.Context,
db *sql.DB,
storeID uint64,
opts *options.ReaderOptions,
) error {
filter := ""
if opts.FilterByEventType {
types := strings.Join(escapeStrings(opts.EventTypes), `, `)
filter = `AND e.event_type IN (` + types + `)`
}
query := fmt.Sprintf(
`SELECT
f.offset,
f.time,
e.event_type,
e.content_type,
e.body,
CURRENT_TIMESTAMP(6)
FROM fact AS f
INNER JOIN event AS e
ON e.id = f.event_id
%s
WHERE f.store_id = %d
AND f.stream = %s
AND f.offset >= ?
ORDER BY offset
LIMIT %d`,
filter,
storeID,
escapeString(r.addr.Stream),
cap(r.facts),
)
stmt, err := db.PrepareContext(ctx, query)
if err != nil {
return err
}
r.stmt = stmt
return nil
}
// run polls the database for facts and sends them to r.facts until r.ctx is
// canceled or an error occurs.
func (r *Reader) run() {
defer r.cancel()
defer close(r.done)
defer r.stmt.Close()
var err error
for err == nil {
err = r.tick()
}
if err != context.Canceled {
r.done <- err
}
}
// tick executes one pass of the worker goroutine.
func (r *Reader) tick() error {
if err := r.globalLimit.Wait(r.ctx); err != nil {
return err
}
if err := r.adaptiveLimit.Wait(r.ctx); err != nil {
return err
}
count, err := r.poll()
if err != nil {
return err
}
r.adjustRate()
r.logPoll(count)
return nil
}
// fetch queries the database for facts beginning at r.addr.
func (r *Reader) poll() (int, error) {
rows, err := r.stmt.QueryContext(
r.ctx,
r.addr.Offset,
)
if err != nil {
return 0, err
}
defer rows.Close()
f := gospel.Fact{
Addr: r.addr,
}
count := 0
var first, now time.Time
for rows.Next() {
if err := rows.Scan(
&f.Addr.Offset,
&f.Time,
&f.Event.EventType,
&f.Event.ContentType,
&f.Event.Body,
&now,
); err != nil {
return count, err
}
select {
case r.facts <- f:
case <-r.ctx.Done():
return count, r.ctx.Err()
}
r.addr = f.Addr.Next()
// keep the time of the first fact in the result to compute the maximum
// instantaneous latency for this poll.
if count == 0 {
first = f.Time
}
count++
if r.debug != nil {
r.debug.averageFactRate.Tick()
}
}
// TODO: this doesn't account for the time spent waiting to write to r.facts.
r.instantaneousLatency = now.Sub(first)
r.averageLatency.Add(r.instantaneousLatency.Seconds())
if count == 0 {
select {
case r.end <- struct{}{}:
default:
}
}
return count, nil
}
// setRate sets the adaptive polling rate, capped between the mininum (set by
// r.starvationLatency) and the maximum (set by the global rate limit).
func (r *Reader) setRate(lim rate.Limit) bool {
min := rate.Every(r.starvationLatency)
max := r.globalLimit.Limit()
if lim < min {
lim = min
} else if lim > max {
lim = max
}
prev := r.adaptiveLimit.Limit()
if lim != prev {
r.adaptiveLimit.SetLimit(lim)
return true
}
return false
}
// adjustRate updates the adaptive poll rate in an attempt to balance database
// poll frequency with latency.
func (r *Reader) adjustRate() bool {
latency := r.effectiveLatency()
// headroom is the difference between the acceptable latency and the
// effective latency. If the headroom is positive, we're doing 'better' than
// the acceptable latency and can backoff the poll rate.
headroom := r.acceptableLatency - latency
// don't back off if our headroom is less than 25%
// if headroom > 0 && headroom < r.acceptableLatency/25 {
// return false
// }
// Get the current rate in terms of an interval.
currentInterval := metrics.RateToDuration(
r.adaptiveLimit.Limit(),
)
return r.setRate(
rate.Every(currentInterval + headroom),
)
}
// effectiveLatency returns the latency used to adjust the poll rate.
//
// The rolling average needs to be primed with several samples before the
// average is available, until then it reports zero, in which case the
// instantaneousLatency value is used instead.
func (r *Reader) effectiveLatency() time.Duration {
latency := r.averageLatency.Value()
if latency == 0 {
return r.instantaneousLatency
}
return time.Duration(
latency * float64(time.Second),
)
}
// logInitialization logs a debug message describing the reader settings.
func (r *Reader) logInitialization() {
if !r.logger.IsDebug() {
return
}
filter := "*"
if r.debug.opts.FilterByEventType {
filter = strings.Join(r.debug.opts.EventTypes, ", ")
}
r.logger.Debug(
"[reader %p] %s | global poll limit: %s | acceptable latency: %s | starvation latency: %s | read-buffer: %d | filter: %s",
r,
r.addr,
formatRate(r.globalLimit.Limit()),
formatDuration(r.acceptableLatency),
formatDuration(r.starvationLatency),
getReadBufferSize(r.debug.opts),
filter,
)
}
// logPoll logs a debug message containing metrics for the previous poll and
// adjustments to the adaptive poll rate.
func (r *Reader) logPoll(count int) {
if r.debug == nil {
return
}
r.debug.averagePollRate.Tick()
pollRate := r.adaptiveLimit.Limit()
if pollRate == r.debug.previousPollRate &&
count == 0 && r.debug.muteEmptyPolls {
return
}
r.debug.muteEmptyPolls = count == 0
r.logger.Debug(
"[reader %p] %s | fetch: %3d %s | queue: %3d/%3d | adaptive poll: %s | avg poll: %s | latency: %s",
r,
r.addr,
count,
formatRate(rate.Limit(r.debug.averageFactRate.Rate())),
len(r.facts),
cap(r.facts),
formatRate(r.adaptiveLimit.Limit()),
formatRate(rate.Limit(r.debug.averagePollRate.Rate())),
formatDuration(r.effectiveLatency()),
)
r.debug.previousPollRate = pollRate
}
// formatRate formats a rate limit for display in reader debug logs.
func formatRate(r rate.Limit) string {
if r == 0 {
// "500.00/s 2.00ms"
return " ?.??/s ?.??µs"
}
d := metrics.RateToDuration(r)
return fmt.Sprintf(
"%6.02f/s %s",
r,
formatDuration(d),
)
}
// formatDuration formats a duration for display in reader debug logs.
func formatDuration(d time.Duration) string {
if d >= time.Hour {
return fmt.Sprintf("%6.02fh ", d.Seconds()/3600)
} else if d >= time.Minute {
return fmt.Sprintf("%6.02fm ", d.Seconds()/60)
} else if d >= time.Second {
return fmt.Sprintf("%6.02fs ", d.Seconds())
} else if d >= time.Millisecond {
return fmt.Sprintf("%6.02fms", d.Seconds()/time.Millisecond.Seconds())
}
return fmt.Sprintf("%6.02fµs", d.Seconds()/time.Microsecond.Seconds())
}
| [
5
] |
package main
import (
"fmt"
"github.com/gin-gonic/gin"
"github.com/jinzhu/gorm"
"github.com/revand/App_Go_Larave_Angular_TEST/backend/go/awards"
"github.com/revand/App_Go_Larave_Angular_TEST/backend/go/users"
"github.com/revand/App_Go_Larave_Angular_TEST/backend/go/common"
"github.com/revand/App_Go_Larave_Angular_TEST/backend/go/redis"
// "github.com/go-redis/redis"
)
func Migrate(db *gorm.DB) {
// users.AutoMigrate()
db.AutoMigrate(&awards.Awards{}) //generate table Awards
db.AutoMigrate(&users.Users{}) //generate table Users
// db.AutoMigrate(&articles.TagModel{})
// db.AutoMigrate(&articles.FavoriteModel{})
// db.AutoMigrate(&articles.ArticleUserModel{})
// db.AutoMigrate(&articles.CommentModel{})
}
type Author struct {
Name string `json:"name"`
Age int `json:"age"`
}
func main() {
// c, err := redis.Dial("tcp", "redis:6379")
//Conection db
db := common.Init()
Migrate(db)
defer db.Close()
r := gin.Default()
MakeRoutes(r)
v1 := r.Group("/api")
// NO TOKEN
awards.AwardsAuthed(v1.Group("/awards"))
users.UsersRegister(v1.Group("/users"))
v1.Use(users.AuthMiddleware(false))
//redis
redis.Routers(v1.Group("/redis"))
// SI TOKEN
v1.Use(users.AuthMiddleware(true))
users.UserRegister(v1.Group("/user"))
fmt.Printf("0.0.0.0:3000")
r.Run(":3000")
}
func MakeRoutes(r *gin.Engine) {
cors := func(c *gin.Context) {
c.Writer.Header().Set("Access-Control-Allow-Origin", "*")
c.Writer.Header().Set("Access-Control-Allow-Credentials", "true")
c.Writer.Header().Set("Access-Control-Allow-Headers", "Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization, accept, origin, Cache-Control, X-Requested-With")
c.Writer.Header().Set("Access-Control-Allow-Methods", "*")
c.Writer.Header().Set("Content-Type", "application/json")
if c.Request.Method == "OPTIONS" {
c.AbortWithStatus(200)
}
c.Next()
}
r.Use(cors)
}
| [
3
] |
package main
import (
"fmt"
)
func fibc(N int) (int, int) {
a0, a1 := 1, 0
b0, b1 := 0, 1
if N == 0 {
return a0, a1
} else if N == 1 {
return b0, b1
}
var c0, c1 int
for i := 2; i <= N; i++ {
c0, c1 = a0+b0, a1+b1
a0, a1 = b0, b1
b0, b1 = c0, c1
}
return c0, c1
}
func main() {
var T, N int
fmt.Scan(&T)
for i := 0; i < T; i++ {
fmt.Scan(&N)
c0, c1 := fibc(N)
fmt.Println(c0, c1)
}
}
| [
2
] |
package main
import (
"fmt"
"github.com/aws/aws-sdk-go/aws"
//"github.com/aws/aws-sdk-go/service/ec2"
"flag"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/service/cloudwatch"
. "github.com/tnantoka/chatsworth"
"io/ioutil"
"log"
"strings"
"time"
)
func main() {
var p = flag.String("p", "./profiles", "AWS Profiles")
var k = flag.String("k", "./.api_token", "Chatwork API Token")
var r = flag.String("r", "", "ChatWork Room ID")
flag.Parse()
cw := Chatsworth{
RoomID: *r,
APIToken: loadToken(*k),
}
cw.PostMessage(buildMessage(*p))
}
func loadToken(file string) string {
token, err := ioutil.ReadFile(file)
if err != nil {
log.Fatal(err)
}
return string(token)
}
func buildMessage(file string) string {
profiles, err := ioutil.ReadFile(file)
if err != nil {
log.Fatal(err)
}
var validProfiles []string
for _, profile := range strings.Split(string(profiles), "\n") {
if len(profile) > 0 {
validProfiles = append(validProfiles, profile)
}
}
messageChan := fetchCharges(validProfiles)
message := "[info][title]AWSの課金額[/title]"
for i := 0; i < len(validProfiles); i++ {
m := <-messageChan
fmt.Print(m)
message += m
}
message += "[/info]"
return message
}
func fetchCharges(profiles []string) <-chan string {
messageChan := make(chan string)
for _, profile := range profiles {
go func(profile string) {
config := aws.Config{Region: "us-east-1"}
config.Credentials = credentials.NewSharedCredentials("", profile)
message := profile + ": " + fetchCharge(config) + "ドル\n"
messageChan <- message
}(profile)
}
return messageChan
}
func fetchCharge(config aws.Config) string {
dimension := cloudwatch.Dimension{
Name: aws.String("Currency"),
Value: aws.String("USD"),
}
svc := cloudwatch.New(&config)
input := cloudwatch.GetMetricStatisticsInput{
Dimensions: []*cloudwatch.Dimension{&dimension},
StartTime: aws.Time(time.Now().Add(-24 * time.Hour)),
EndTime: aws.Time(time.Now()),
MetricName: aws.String("EstimatedCharges"),
Namespace: aws.String("AWS/Billing"),
Period: aws.Long(60),
Statistics: []*string{aws.String("Maximum")},
//Unit: "",
}
output, err := svc.GetMetricStatistics(&input)
if err != nil {
log.Fatal(err)
}
var dp = output.Datapoints[0]
return fmt.Sprint(*dp.Maximum)
}
| [
3
] |
package content
//CmdEventSetupUse is a command to setup event stream
const CmdEventSetupUse = "setup"
//CmdEventSetupShort is the short version description for vss event setup command
const CmdEventSetupShort = "Setup event stream"
//CmdEventSetupLong is the long version description for vss event setup command
const CmdEventSetupLong = "Run this command to setup event stream. " +
"It will create a CloudFormation stack with an event rule and SNS topic. " +
"You will need to run this script for each cloud account. " +
"Make sure your aws credentials have been configured before run this command."
//CmdEventUse is command for event stream
const CmdEventUse = "event"
//CmdEventShort is the short version description for vss event command
const CmdEventShort = "Manage event stream"
//CmdEventLong is the long version description for vss event command
const CmdEventLong = "Manage event stream"
//CmdEventSetupExample is the use case for command event setup
const CmdEventSetupExample = ` vss event setup
vss event setup --aws-profile YOUR_AWS_PROFILE --cloud-id YOUR_CLOUD_ID`
//CmdEventRemoveUse is the command name for command event remove
const CmdEventRemoveUse = "remove"
//CmdEventRemoveShort is the short version description for vss event remove command
const CmdEventRemoveShort = "Remove event stream"
//CmdEventRemoveLong is the long version description for vss event remove command
const CmdEventRemoveLong = "Run this command to remove event stream." +
"You will need to run this script for each cloud account." +
"Make sure your aws credentials have been configured before run this command."
//CmdEventRemoveExample is the use case for command event remove
const CmdEventRemoveExample = `vss event remove
vss event remove --aws-profile YOUR_AWS_PROFILE --cloud-id YOUR_CLOUD_ID`
const CmdEventAuthFile = "auth-file"
const CmdEventAuthFileDescription = "auth file for azure authentication"
const CmdEventRegion = "region"
const CmdEventRegionDescription = "The region in which you'd like to create Azure resource group in"
| [
3
] |
package metric_parser
import (
//"github.com/Cepave/open-falcon-backend/common/utils"
//"log"
)
type metricType byte
const (
MetricMax metricType = 1
MetricMin metricType = 2
MetricAvg metricType = 3
MetricMed metricType = 4
MetricMdev metricType = 5
MetricLoss metricType = 6
MetricCount metricType = 7
MetricPckSent metricType = 8
MetricPckReceived metricType = 9
MetricNumAgent metricType = 10
MetricNumTarget metricType = 11
)
var mapOfMetric = map[string]metricType {
"max": MetricMax,
"min": MetricMin,
"avg": MetricAvg,
"med": MetricMed,
"mdev": MetricMdev,
"loss": MetricLoss,
"count": MetricCount,
"pck_sent": MetricPckSent,
"pck_received": MetricPckReceived,
"num_agent": MetricNumAgent,
"num_target": MetricNumTarget,
}
| [
3
] |
package proxies
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
datadog "github.com/DataDog/datadog-api-client-go/api/v1/datadog"
"github.com/vivasaayi/cloudrover/utililties"
)
type DataDogProxy struct {
ctx context.Context
apiClient *datadog.APIClient
apiKey string
appKey string
}
func GetDataDogProxy() *DataDogProxy {
ddp := DataDogProxy{}
ddp.ctx = datadog.NewDefaultContext(context.Background())
configuration := datadog.NewConfiguration()
ddp.apiClient = datadog.NewAPIClient(configuration)
ddp.apiKey = utililties.GetStringEnvVar("DD_API_KEY", "", true)
ddp.appKey = utililties.GetStringEnvVar("DD_APP_KEY", "", true)
return &ddp
}
func (ddp *DataDogProxy) GetEvents(
source string,
startTime int64,
endTime int64,
eventPriority string) datadog.EventListResponse {
priority := datadog.EventPriority(eventPriority)
sources := source
// tags := ""
unaggregated := true
excludeAggregate := true
// page := int32(56)
optionalParams := datadog.ListEventsOptionalParameters{
Priority: &priority,
Sources: &sources,
// Tags: &tags,
Unaggregated: &unaggregated,
ExcludeAggregate: &excludeAggregate,
// Page: &page,
}
resp, r, err := ddp.apiClient.EventsApi.ListEvents(ddp.ctx, startTime, endTime, optionalParams)
if err != nil {
fmt.Fprintf(os.Stderr, "Error when calling `EventsApi.ListEvents`: %v\n", err)
fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", r)
}
return resp
}
func (ddp *DataDogProxy) GetMonitors() []datadog.Monitor {
groupStates := "all" // string | When specified, shows additional information about the group states. Choose one or more from `all`, `alert`, `warn`, and `no data`. (optional)
// name := "name_example" // string | A string to filter monitors by name. (optional)
// tags := "tags_example" // string | A comma separated list indicating what tags, if any, should be used to filter the list of monitors by scope. For example, `host:host0`. (optional)
// monitorTags := "monitorTags_example" // string | A comma separated list indicating what service and/or custom tags, if any, should be used to filter the list of monitors. Tags created in the Datadog UI automatically have the service key prepended. For example, `service:my-app`. (optional)
withDowntimes := true // bool | If this argument is set to true, then the returned data includes all current downtimes for each monitor. (optional)
idOffset := int64(789) // int64 | Monitor ID offset. (optional)
page := int64(789) // int64 | The page to start paginating from. If this argument is not specified, the request returns all monitors without pagination. (optional)
pageSize := int32(56) // int32 | The number of monitors to return per page. If the page argument is not specified, the default behavior returns all monitors without a `page_size` limit. However, if page is specified and `page_size` is not, the argument defaults to 100. (optional)
optionalParams := datadog.ListMonitorsOptionalParameters{
GroupStates: &groupStates,
// Name: &name,
// Tags: &tags,
// MonitorTags: &monitorTags,
WithDowntimes: &withDowntimes,
IdOffset: &idOffset,
Page: &page,
PageSize: &pageSize,
}
resp, r, err := ddp.apiClient.MonitorsApi.ListMonitors(ddp.ctx, optionalParams)
if err != nil {
fmt.Fprintf(os.Stderr, "Error when calling `MonitorsApi.ListMonitors`: %v\n", err)
fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", r)
}
return resp
}
func (ddp *DataDogProxy) SearchMonitors() DDMonitorSearchResponse {
client := &http.Client{}
req, err := http.NewRequest("GET",
`https://api.datadoghq.com/api/v1/monitor/search?query=status:alert&per_page=1000`,
nil,
)
if err != nil {
fmt.Println("Error occured when retrieving the alerts")
fmt.Println(err)
}
req.Header.Add("Content-Type", `application/json`)
req.Header.Add("DD-API-KEY", ddp.apiKey)
req.Header.Add("DD-APPLICATION-KEY", ddp.appKey)
resp, err := client.Do(req)
if err != nil {
fmt.Println("Error occured when making http request")
}
fmt.Println(resp)
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
panic(err.Error())
}
fmt.Println(string(body))
result := DDMonitorSearchResponse{}
err = json.Unmarshal(body, &result)
if err != nil {
fmt.Println("Error occured when parsing search response")
fmt.Println(err)
}
return result
}
| [
6
] |
package server
import (
"net/http"
"log"
"fmt"
)
var mymux *http.ServeMux
const (
mailAddress string = "http://121.40.190.238:1280"
)
func Run() {
mymux = http.NewServeMux()
//绑定路由
bind()
err := http.ListenAndServe(":1280", mymux) //设置监听的端口
if err != nil {
log.Fatal("ListenAndServe: ", err)
}
}
func TestDatabase() {
var user User
users := user.QueryAll()
var le = len(users)
for i := 0; i < le; i++ {
fmt.Println(users[i].contents["id"])
fmt.Println(users[i].contents["username"])
fmt.Println("xxxxxxxxxxxxxxx")
}
}
func TestUsers() {
var user User
users := user.QueryAll()
var le = len(users)
for i := 0; i < le; i++ {
fmt.Println(users[i].contents["id"])
fmt.Println(users[i].contents["username"])
fmt.Println("xxxxxxxxxxxxxxx")
}
} | [
3
] |
package main
import (
"context"
"fmt"
"net"
"time"
"github.com/envoyproxy/go-control-plane/pkg/cache/types"
"github.com/envoyproxy/go-control-plane/pkg/cache/v2"
"github.com/golang/glog"
"github.com/golang/protobuf/ptypes"
"google.golang.org/grpc"
api "github.com/envoyproxy/go-control-plane/envoy/api/v2"
core "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
endpoint "github.com/envoyproxy/go-control-plane/envoy/api/v2/endpoint"
discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v2"
xds "github.com/envoyproxy/go-control-plane/pkg/server/v2"
)
type ADDR struct {
Address string
Port uint32
}
type NodeConfig struct {
node *core.Node
endpoints []types.Resource
clusters []types.Resource
routes []types.Resource
listeners []types.Resource
runtimes []types.Resource
}
//implement cache.NodeHash
func (n NodeConfig) ID(node *core.Node) string {
return node.GetId()
}
func ClusterStatic(name string, address []ADDR) *api.Cluster {
lbEndpoints := make([]*endpoint.LbEndpoint, len(address))
for idx, addr := range address {
lbEndpoint := &endpoint.LbEndpoint{
HostIdentifier: &endpoint.LbEndpoint_Endpoint{
Endpoint: &endpoint.Endpoint{
Address: &core.Address{
Address: &core.Address_SocketAddress{
SocketAddress: &core.SocketAddress{
Protocol: core.SocketAddress_TCP,
Address: addr.Address,
PortSpecifier: &core.SocketAddress_PortValue{
PortValue: addr.Port,
},
},
},
},
},
},
}
lbEndpoints[idx] = lbEndpoint
}
localityLbEndpoints := &endpoint.LocalityLbEndpoints{
LbEndpoints: lbEndpoints,
}
endpoints := make([]*endpoint.LocalityLbEndpoints, 0)
endpoints = append(endpoints, localityLbEndpoints)
clusterLoadAssignment := &api.ClusterLoadAssignment{
ClusterName: name,
Endpoints: endpoints,
}
cluster := &api.Cluster{
Name: name,
AltStatName: name,
ClusterDiscoveryType: &api.Cluster_Type{
Type: api.Cluster_STATIC,
},
EdsClusterConfig: nil,
ConnectTimeout: ptypes.DurationProto(1 * time.Second),
PerConnectionBufferLimitBytes: nil, // default 1MB
LbPolicy: api.Cluster_ROUND_ROBIN,
LoadAssignment: clusterLoadAssignment,
}
return cluster
}
func UpdateSnapshotCache(s cache.SnapshotCache, n *NodeConfig, version string) {
err := s.SetSnapshot(n.ID(n.node), cache.NewSnapshot(version, n.endpoints, n.clusters, n.routes, n.listeners, n.runtimes))
if err != nil {
glog.Error(err)
}
}
//func Update_SnapshotCache(s cache.SnapshotCache, n *NodeConfig
func main() {
snapshotCache := cache.NewSnapshotCache(false, cache.IDHash{}, nil)
server := xds.NewServer(context.Background(), snapshotCache, nil)
grpcServer := grpc.NewServer()
lis, _ := net.Listen("tcp", ":5678")
discovery.RegisterAggregatedDiscoveryServiceServer(grpcServer, server)
api.RegisterEndpointDiscoveryServiceServer(grpcServer, server)
api.RegisterClusterDiscoveryServiceServer(grpcServer, server)
api.RegisterRouteDiscoveryServiceServer(grpcServer, server)
api.RegisterListenerDiscoveryServiceServer(grpcServer, server)
go func() {
if err := grpcServer.Serve(lis); err != nil {
glog.Error(err)
}
}()
node := &core.Node{ // 根据yaml文件中定义的id和名称
Id: "envoy-64.58",
Cluster: "test",
}
nodeConf := &NodeConfig{
node: node,
endpoints: []types.Resource{},
clusters: []types.Resource{},
routes: []types.Resource{},
listeners: []types.Resource{},
runtimes: []types.Resource{},
}
input := ""
{
clusterName := "Cluster_With_Static_Endpoint"
fmt.Printf("Enter to update: %s", clusterName)
_, _ = fmt.Scanf("\n", &input)
var addrs []ADDR
addrs = append(addrs, ADDR{
Address: "127.0.0.1",
Port: 8081,
})
cluster := ClusterStatic(clusterName, addrs)
nodeConf.clusters = append(nodeConf.clusters, cluster)
UpdateSnapshotCache(snapshotCache, nodeConf, time.Now().String())
glog.Info(clusterName + " updated")
}
select {}
}
| [
3
] |
package main
import (
"context"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"time"
"github.com/juju/loggo"
"github.com/mongodb/mongo-go-driver/bson"
"github.com/mongodb/mongo-go-driver/bson/primitive"
"github.com/mongodb/mongo-go-driver/mongo"
"github.com/mongodb/mongo-go-driver/mongo/options"
"github.com/segmentio/kafka-go"
"gopkg.in/yaml.v2"
)
type Config struct {
KafkaBroker string `yaml:"kafkaBroker"`
MongoUri string `yaml:"mongoUri"`
Database string `yaml:"database"`
// By limiting ourselves to these collections I can assume that a
// topic already exists and has messages. If that assumption
// changes, we'll need to check if a topic exists, be able to
// create if it doesn't and then start the mongo changestream
// watcher from the start of that collection.
Collections []string `yaml:"collections"`
LogLevel string `yaml:"logLevel"`
}
var logger = loggo.GetLogger("main")
func main() {
logger.SetLogLevel(loggo.INFO)
logger.Infof("Started")
dat, err := ioutil.ReadFile("config.yml")
if err != nil {
logger.Errorf(err.Error())
os.Exit(1)
}
config := Config{}
err = yaml.Unmarshal(dat, &config)
if err != nil {
logger.Errorf(err.Error())
os.Exit(1)
}
level, ok := loggo.ParseLevel(config.LogLevel)
if ok {
logger.SetLogLevel(level)
} else {
logger.Warningf("Log level %s is unknown, using INFO", config.LogLevel)
}
db, err := openDatabase(config)
if err != nil {
logger.Errorf(err.Error())
os.Exit(1)
}
expectedCollections := make(map[string]bool)
for i := 0; i < len(config.Collections); i++ {
expectedCollections[config.Collections[i]] = true
}
collections := make(map[string]*mongo.Collection)
channel := make(chan string)
for {
logger.Debugf("Listing all collections")
err = startWatchingNewCollections(db, channel, expectedCollections, collections, config.KafkaBroker)
if err != nil {
// TODO: probably want to die if we error enough times
logger.Errorf(err.Error())
}
sleepAndCleanup(channel, collections)
}
}
func openDatabase(config Config) (*mongo.Database, error) {
client, err := mongo.NewClient(config.MongoUri)
if err != nil {
return nil, err
}
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
err = client.Connect(ctx)
if err != nil {
return nil, err
}
db := client.Database(config.Database)
return db, nil
}
func startWatchingNewCollections(db *mongo.Database, c chan string, expectedCollections map[string]bool, collections map[string]*mongo.Collection, broker string) error {
cursor, err := db.ListCollections(context.Background(), bson.D{})
if err != nil {
return err
}
for cursor.Next(context.Background()) {
var result bson.M
err := cursor.Decode(&result)
if err != nil {
logger.Errorf(err.Error())
continue
}
collectionName := result["name"].(string)
_, ok := expectedCollections[collectionName]
if !ok {
logger.Infof("Collection %s is unexpected", collectionName)
continue
}
_, ok = collections[collectionName]
if ok {
continue
}
collection := db.Collection(collectionName)
collections[collectionName] = collection
go WatchCollection(broker, collection, c)
}
return nil
}
func sleepAndCleanup(channel chan string, collections map[string]*mongo.Collection) {
for {
shouldBreak := false
select {
case doneCollection := <-channel:
// We've stopped watching this collection so it needs
// to be removed from the collections map so that
// on the next pass through we can re-add it
delete(collections, doneCollection)
case <-time.After(15 * time.Second):
shouldBreak = true
}
if shouldBreak {
break
}
}
}
// Opens up a changestream cursor on the collection and writes new
// documents to the kafka broker.
// If there is an error, we send a message to the channel indicating
// that we've stopped watching
func WatchCollection(broker string, collection *mongo.Collection, c chan string) {
defer func() { logger.Infof("Stopping watcher for %s.%s", collection.Database().Name(), collection.Name()) }()
// The channel is used to indicate that an error has happened
// watching the collection. Hopefully the main goroutine will be
// able to restart us.
defer func() { c <- collection.Name() }()
logger.Infof("Watching %s.%s", collection.Database().Name(), collection.Name())
cs := options.ChangeStream()
topic := fmt.Sprintf("mongo_%s_%s", collection.Database().Name(), collection.Name())
lastMessage, err := getLastMessage(broker, topic)
if err != nil {
logger.Errorf(err.Error())
return
}
payload := lastMessage["payload"].(map[string]interface{})
token, ok := payload["resumeToken"]
if ok {
logger.Debugf("Using resumeToken")
cs.SetResumeAfter(bson.M{"_data": token})
} else {
logger.Debugf("Using timestamp")
timestamp := uint32(payload["timestamp"].(float64))
inc := uint32(payload["order"].(float64))
// inc is a counter so its safe to just increment one to get the next document.
// If we don't increment one, we get the same document that was already in kafka.
// https://docs.mongodb.com/manual/reference/bson-types/#timestamps
cs.SetStartAtOperationTime(&primitive.Timestamp{timestamp, inc + 1})
}
cursor, err := collection.Watch(context.Background(), mongo.Pipeline{}, cs)
if err != nil {
logger.Errorf(err.Error())
return
}
w := kafka.NewWriter(kafka.WriterConfig{
Brokers: []string{broker},
Topic: topic,
Balancer: &kafka.LeastBytes{},
})
defer w.Close()
logger.Debugf("Waiting for documents on: %s", collection.Name())
for cursor.Next(context.Background()) {
logger.Debugf("New document recieved for %s", collection.Name())
var item bson.M
cursor.Decode(&item)
operationType := item["operationType"].(string)
if operationType != "insert" {
logger.Warningf("Document has operationType %s, expected insert", operationType)
continue
}
// Note that this needs to be synchronous. If this was
// asynchronous and something goes wrong it might be possible
// for event B to get into kafka and not event A and so event
// A would be lost forever
msg, err := getMessage(item)
if err != nil {
logger.Errorf(err.Error())
return
}
err = w.WriteMessages(context.Background(), *msg)
if err != nil {
logger.Errorf(err.Error())
return
}
logger.Debugf("Sent message %s to %s", string(msg.Value), topic)
}
}
// Returns the last message on kafka for the given topic in partition 0.
// This probably only works correctly if there is only one partition
func getLastMessage(broker string, topic string) (map[string]interface{}, error) {
// TODO: this is so much work just to get one message. Maybe there is a better way?
logger.Debugf("Getting last message for %s", topic)
conn, err := kafka.DialLeader(context.Background(), "tcp", broker, topic, 0)
first, last, err := conn.ReadOffsets()
logger.Debugf("For %s: first: %d, last: %d", topic, first, last)
if last == 0 {
return nil, errors.New(fmt.Sprintf("Topic %s doesn't have any messages", topic))
}
// Would be nice if I could re-use the connection from above
// but that is not part of the library
r := kafka.NewReader(kafka.ReaderConfig{
Brokers: []string{broker},
Topic: topic,
Partition: 0,
MinBytes: 0,
MaxBytes: 10e6, // 10MB
})
r.SetOffset(last - 1)
m, err := r.ReadMessage(context.Background())
if err != nil {
logger.Errorf(err.Error())
return nil, err
}
var f interface{}
err = json.Unmarshal(m.Value, &f)
return f.(map[string]interface{}), nil
}
// Converts and serializes the input document and then
// sends it along to kafka.
func getMessage(doc bson.M) (*kafka.Message, error) {
msgValue, err := ConvertToOldFormat(doc)
if err != nil {
return nil, err
}
output, err := json.Marshal(msgValue)
if err != nil {
return nil, err
}
msg := kafka.Message{Value: output}
return &msg, nil
}
type connectSchema struct {
Schema payloadSchema `json:"schema"`
Payload payloadData `json:"payload"`
}
type payloadSchema struct {
Type string `json:"type"`
Optional bool `json:"optional"`
Fields []field `json:"fields"`
Name string `json:"name"`
}
type field struct {
Type string `json:"type"`
Optional bool `json:"optional"`
Field string `json:"field"`
}
type payloadData struct {
Timestamp uint32 `json:"timestamp"`
Order uint32 `json:"order"`
Operation string `json:"operation"`
Database string `json:"database"`
Object string `json:"object"`
ResumeToken string `json:"resumeToken"`
}
func ConvertToOldFormat(doc bson.M) (connectSchema, error) {
namespace := doc["ns"].(bson.M)
name := fmt.Sprintf("mongodbschema_%s_%s", namespace["db"], namespace["coll"])
timestamp := doc["clusterTime"].(primitive.Timestamp)
fullDocument := doc["fullDocument"].(bson.M)
// This transformation is to remain compatible with the previous
// oplog reader
fullDocument["_id"] = bson.M{"$oid": fullDocument["_id"]}
documentBytes, err := json.Marshal(fullDocument)
if err != nil {
logger.Errorf(err.Error())
return connectSchema{}, err
}
resumeToken := doc["_id"].(bson.M)["_data"].(string)
logger.Debugf(resumeToken)
// The whole connectSchema will also be json encoded
// and so we need convert the bytes into a string
// otherwise the []bytes get encoded using base64
documentStr := string(documentBytes)
results := connectSchema{
Schema: payloadSchema{
Type: "struct",
Optional: false,
Name: name,
Fields: []field{
field{"int32", true, "timestamp"},
field{"int32", true, "order"},
field{"string", true, "operation"},
field{"string", true, "database"},
field{"string", true, "object"},
field{"string", true, "resumeToken"}}},
Payload: payloadData{
Timestamp: timestamp.T,
Order: timestamp.I,
Operation: "i",
Database: fmt.Sprintf("%s.%s", namespace["db"], namespace["coll"]),
Object: documentStr,
ResumeToken: resumeToken}}
return results, nil
}
| [
6
] |
package deviceactionapi
import (
log "github.com/cihub/seelog"
)
// ActionRobotCleanerReq 发送MQTT命令的BODY
type ActionRobotCleanerReq struct {
CleanSpeed CleanSpeedOption `json:"clean_speed,omitempty"`
FindMe FindMeOption `json:"find_me,omitempty"`
StopClean StopCleanOption `json:"stop_clean,omitempty"`
TimerOption TimerOption `json:"timer_option,omitempty"`
TurnDirection TurnDirectionOption `json:"turn_direction,omitempty"`
WorkMode WorkModeOption `json:"work_mode,omitempty"`
}
// CleanSpeedOption hh
type CleanSpeedOption struct {
Speed int `json:"speed"` // 速度选项:0:日常 1:强力 2:地毯 3:静音
}
// FindMeOption hh.
type FindMeOption struct {
OnOff int `json:"on_off"` // 0: 关闭findme功能 扫地机停止发声; 1:开启findme功能,扫地机持续鸣叫
}
// StopCleanOption hh.
type StopCleanOption struct {
Stop int `json:"stop "` // 1:停止; 0:对应工作模式
}
// TimerOption hh.
type TimerOption struct {
ScheduleType string `json:"schedule_type "` //Timer调度类型, 目前可选值为 weekly, 后续版本会增加daily等
WeeklyOption WeeklyTimerOption `json:"weekly_option"`
}
// WeeklyTimerOption hh.
type WeeklyTimerOption struct {
StartHour int `json:"start_hour"` //开始执行的小时, 24小时制, 可选值为 0~23
StartMinute int `json:"start_minute"` //开始执行的分钟, 60分钟制, 可选值为 0~59 ,
Weekday int `json:"weekday"` //分别对应(Sunday=0, Monday=1, ..., Saturday=6)
}
// TurnDirectionOption hh.
type TurnDirectionOption struct {
Direction int `json:"direction"` //0:Forward, 1:Backward, 2:Left, 3:Right
}
// WorkModeOption hh.
type WorkModeOption struct {
Mode int `json:"mode"` //0:暂停, 1:定点, 2:自动, 3:返回充电, 4:沿边, 5:精扫
}
// ActionRobotCleanerResp 解析返回值
type ActionRobotCleanerResp struct {
Message string `json:"message"`
ResCode int `json:"res_code"` //1成功 0失败, 2001, Device Action Result is pending, need check status later
}
//---------------------------------------------------------------------------------------------------------------------
// NewActionRobotCleanerReq hh.
func NewActionRobotCleanerReq(option string, d ...int) interface{} {
if option == "findMe" {
log.Debugf("do something for %s", option)
}
if option == "turnDirection" {
log.Debugf("do something for %s", option)
}
req := &ActionRobotCleanerReq{
CleanSpeed: CleanSpeedOption{
Speed: d[0],
},
FindMe: FindMeOption{
OnOff: d[1],
},
StopClean: StopCleanOption{
Stop: d[2],
},
WorkMode: WorkModeOption{
Mode: d[3],
},
}
return req
}
| [
3
] |
package service
import (
"net/http"
"bytes"
"time"
"errors"
"io/ioutil"
"encoding/json"
"X/goappsrv/src/helper"
"X/goappsrv/src/model"
)
type airTableRecord struct {
Id string `json:"id"`
Fields guestField `json:"fields"`
}
type AirTableList struct {
Records []airTableRecord `json:"records"`
Offset string `json:"offset,omitempty"`
}
type qrImageStruct struct {
Id string `json:"id,omitempty"`
Url string `json:"url"`
FileName string `json:"filename"`
}
type roleOverview struct {
FileName string `json:"filename"`
Url string `json:"url"`
}
type guestField struct {
Name string `json:"Name,omitempty"`
FirstName string `json:"Guest First Name,omitempty"`
LastName string `json:"Guest Last Name,omitempty"`
PromDay string `json:"Prom Day,omitempty"`
Gender string `json:"Gender,omitempty"`
LOSupervision string `json:"Level of Supervision,omitempty"`
SNDescription string `json:"SN Description,omitempty"`
RespiteRoom []string `json:"Respite Room,omitempty"`
SpecificBuddy string `json:"Specific Buddy,omitempty"`
LOBathroom string `json:"Level of Bathroom Assistance,omitempty"`
Medication string `json:"Medication During Prom,omitempty"`
DRestriction []string `json:"Dietary Restrictions,omitempty"`
Sensory []string `json:"Sensory,omitempty"`
CherryOnTop string `json:"Cherry On Top,omitempty"`
Limo string `json:"Limo,omitempty"`
ContactName string `json:"Contact Name,omitempty"`
ContactNumber string `json:"Contact #,omitempty"`
ContactEmail string `json:"Email,omitempty"`
MailingAddress string `json:"Mailing Address,omitempty"`
Notes string `json:"NOTES,omitempty"`
ArrivalTime string `json:"Arrival Time,omitempty"`
PagerNumber string `json:"Pager Number,omitempty"`
TimeOfMed string `json:"Time of Medication,omitempty"`
LastModified string `json:"Last Modified,omitempty"`
QRValue string `json:"QR Value,omitempty"`
QRImage []qrImageStruct `json:"QR Image,omitempty"`
Teams string `json:"TEAMS,omitempty"`
Role string `json:"ROLE,omitempty"`
ROverview []roleOverview `json:"ROLE OVERVIEW,omitempty"`
TeamRoster string `json:"Team Roster List,omitempty"`
}
func LoadAirTable(c helper.ContextDetail, airTableDetail model.ItemDetail) (*AirTableList, error) {
var airTableList = new(AirTableList)
var offset = ""
var isEnd = false;
for ok := true; ok; ok = (!isEnd) {
url := "https://api.airtable.com/v0/" + airTableDetail.WebURL + "?view=QRAppView&offset=" + offset
//url := "https://api.airtable.com/v0/" + airTableDetail.WebURL + "?view=QRAppView&maxRecords=15&offset=" + offset
helper.Log(c, "info", "Loading air table", "uid", c.UID, "url", url)
client := &http.Client{
CheckRedirect: func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
},
}
req, err := http.NewRequest("GET", url, nil)
if err != nil {
helper.Log(c, "error", "Error Loading air table", "airtableId", airTableDetail.ID, "error", err.Error())
err := errors.New("Air Table API Error")
return nil, err
}
req.Header.Set("Authorization", "Bearer " + airTableDetail.ExtID)
resp, err := client.Do(req)
if err != nil {
helper.Log(c, "error", "Error Loading air table", "airtableId", airTableDetail.ID, "error", err.Error())
err := errors.New("Air Table API Error")
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
helper.Log(c, "error", "Http call not successful", "airtableId", airTableDetail.ID, "response code", resp.Status)
err := errors.New("Air Table API Error")
return nil, err
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
helper.Log(c, "error", "Error parsing response body", "airtableId", airTableDetail.ID, "error", err.Error())
err := errors.New("Error Parsing Air Table")
return nil, err
}
var respJson = new(AirTableList)
err = json.Unmarshal(body, &respJson)
if err != nil {
helper.Log(c, "error", "Error parsing response body", "airtableId", airTableDetail.ID, "error", err.Error())
err := errors.New("Error Parsing Air Table")
return nil, err
}
airTableList.Records = append(airTableList.Records, respJson.Records...)
for i, _ := range respJson.Records {
if (len(respJson.Records[i].Fields.QRImage) == 0) {
helper.Log(c, "info", "Generating QR Code", "airtableId", airTableDetail.ID, "extId", respJson.Records[i].Id)
err := LoadQRCode(c, airTableDetail, respJson.Records[i].Id )
if err != nil {
helper.Log(c, "error", "Error Generating QR Code", "extId", respJson.Records[i].Id, "error", err.Error())
}
}
}
if respJson.Offset != "" {
offset = respJson.Offset
isEnd = false
time.Sleep(200 * time.Millisecond)
} else {
offset = ""
isEnd = true
}
}
return airTableList, nil
}
func LoadQRCode(c helper.ContextDetail, airTableDetail model.ItemDetail, itemId string) error {
url := "https://api.airtable.com/v0/" + airTableDetail.WebURL
client := &http.Client{
CheckRedirect: func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
},
}
qrImage := qrImageStruct {
Url: "https://api.qrserver.com/v1/create-qr-code/?size=250x250&data=https://nts.lqd.ch/" + itemId,
FileName: "qrcode",
}
qrImageArray := []qrImageStruct{qrImage}
bodyItem := airTableRecord {
Id: itemId,
}
bodyItem.Fields.QRImage = qrImageArray
bodyItem.Fields.QRValue = itemId
var bodyJson = new(AirTableList)
bodyJson.Records = append(bodyJson.Records, bodyItem)
bytesField, _ := json.Marshal(bodyJson)
req, err := http.NewRequest("PATCH", url, bytes.NewBuffer(bytesField))
req.Header.Set("Authorization", "Bearer " + airTableDetail.ExtID)
req.Header.Set("Content-Type", "application/json")
resp, err := client.Do(req)
if err != nil {
helper.Log(c, "warning", "AirTable QR Update Error", "extId", itemId, "error", err.Error())
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
helper.Log(c, "warning", "AirTable QR Update Error", "extId", itemId, "response code", resp.Status)
err := errors.New("QR Code Generation error")
return err
}
return nil
} | [
3
] |
package explicit
import (
"bitbucket.org/gofd/gofd/core"
"testing"
)
func alldifferentPrimitives_test(t *testing.T, xinit []int, yinit []int,
zinit []int, qinit []int, expx []int, expy []int,
expz []int, expq []int, expready bool) {
X := core.CreateIntVarExValues("X", store, xinit)
Y := core.CreateIntVarExValues("Y", store, yinit)
Z := core.CreateIntVarExValues("Z", store, zinit)
Q := core.CreateIntVarExValues("Q", store, qinit)
store.AddPropagators(CreateAlldifferentPrimitives(X, Y, Z, Q))
ready := store.IsConsistent()
ready_test(t, "Alldifferent2", ready, expready)
if expready {
domainEquals_test(t, "Alldifferent2", X, expx)
domainEquals_test(t, "Alldifferent2", Y, expy)
domainEquals_test(t, "Alldifferent2", Z, expz)
domainEquals_test(t, "Alldifferent2", Q, expq)
}
}
func Test_AlldifferentPrimitivesa(t *testing.T) {
setup()
defer teardown()
log("AlldifferentPrimitivesa: X:0, Y:0..1, Z:1..2, Q:2..3")
alldifferentPrimitives_test(t, []int{0}, []int{0, 1}, []int{1, 2}, []int{2, 3},
[]int{0}, []int{1}, []int{2}, []int{3}, true)
}
func Test_AlldifferentPrimitivesb(t *testing.T) {
setup()
defer teardown()
log("AlldifferentPrimitivesb: X:0..1, Y:1, Z:2..3, Q:3")
alldifferentPrimitives_test(t, []int{0, 1}, []int{1}, []int{2, 3}, []int{3},
[]int{0}, []int{1}, []int{2}, []int{3}, true)
}
func Test_AlldifferentPrimitivesc(t *testing.T) {
setup()
defer teardown()
log("AlldifferentPrimitivesc: X:0, Y:1, Z:2, Q:3")
alldifferentPrimitives_test(t, []int{0}, []int{1}, []int{2}, []int{3},
[]int{0}, []int{1}, []int{2}, []int{3}, true)
}
func Test_AlldifferentPrimitivesd(t *testing.T) {
setup()
defer teardown()
log("AlldifferentPrimitivesd: X:0, Y:0, Z:0, Q:0")
alldifferentPrimitives_test(t, []int{0}, []int{0}, []int{0}, []int{0},
[]int{}, []int{}, []int{}, []int{}, false)
}
| [
6
] |
package controller
import (
"github.com/gin-gonic/gin"
"net/http"
)
// GetIndex show Hello world !!
func GetIndex(c *gin.Context) {
c.String(http.StatusOK, "Hello world !!")
}
// GetFullName get request sample
func GetFullName(c *gin.Context) {
fname := c.DefaultQuery("firstname", "Guest")
lname := c.DefaultQuery("lastname", "Last")
//lname := c.Query("lastname") // c.Request.URL.Query().Get("lastname") と同じ
c.String(http.StatusOK, "Hello %s %s !!", fname, lname)
}
// PostMessage post request sample
func PostMessage(c *gin.Context) {
message := c.PostForm("message")
name := c.DefaultPostForm("name", "Guest")
c.JSON(http.StatusOK, gin.H{
"message": message,
"name": name,
})
}
// SetCookie cookie sample
func SetCookie(c *gin.Context) {
cookie, err := c.Cookie("sample")
if err != nil {
cookie = "none"
c.SetCookie("sample", "cookieValue", 3600, "/sample/set-cookie", "localhost", false, true)
}
c.JSON(http.StatusOK, gin.H{
"value": cookie,
})
}
// BasicAuth Basic Auth sample
func BasicAuth(c *gin.Context) {
var admins = gin.H{
"admin": gin.H{"email": "[email protected]"},
"hoge": gin.H{"email": "[email protected]"},
}
// BasicAuth ミドルウェアによって設定される
user := c.MustGet(gin.AuthUserKey).(string)
if admin, ok := admins[user]; ok {
c.JSON(http.StatusOK, gin.H{"user": user, "admin": admin})
} else {
c.JSON(http.StatusOK, gin.H{"user": user, "admin": "No admin data :("})
}
}
// Html sample
func Html(c *gin.Context) {
var admins = gin.H{
"admin": gin.H{"email": "[email protected]"},
"hoge": gin.H{"email": "[email protected]"},
}
// BasicAuth ミドルウェアによって設定される
user := c.MustGet(gin.AuthUserKey).(string)
if admin, ok := admins[user]; ok {
c.JSON(http.StatusOK, gin.H{"user": user, "admin": admin})
} else {
c.JSON(http.StatusOK, gin.H{"user": user, "admin": "No admin data :("})
}
}
| [
3
] |
// Copyright (c) 2014, Markover Inc.
// Use of this source code is governed by the MIT
// license that can be found in the LICENSE file.
// Source code and contact info at http://github.com/poptip/ftc
package ftc
import (
"encoding/json"
"expvar"
"fmt"
"io"
"net/http"
"strings"
"time"
"code.google.com/p/go.net/websocket"
"github.com/golang/glog"
)
var numClients = expvar.NewInt("num_clients")
const (
// Protocol error codes and mappings.
errorTransportUnknown = 0
errorUnknownSID = 1
errorBadHandshakeMethod = 2
errorBadRequest = 3
// Query parameters used in client requests.
paramTransport = "transport"
paramSessionID = "sid"
// Available transports.
transportWebSocket = "websocket"
transportPolling = "polling"
// The default time before closed connections are cleaned from
// the client pool.
clientReapTimeout = 5 * time.Second
)
var errorMessage = map[int]string{
errorTransportUnknown: "Transport unknown",
errorUnknownSID: "Session ID unknown",
errorBadHandshakeMethod: "Bad handshake method",
errorBadRequest: "Bad request",
}
var (
validTransports = map[string]bool{
transportWebSocket: true,
transportPolling: true,
}
validUpgrades = map[string]bool{
transportWebSocket: true,
}
)
// getValidUpgrades returns a slice containing the valid protocols
// that a connection can upgrade to.
func getValidUpgrades() []string {
upgrades := make([]string, len(validUpgrades))
i := 0
for u := range validUpgrades {
upgrades[i] = u
i++
}
return upgrades
}
// A Handler is called by the server when a connection is
// opened successfully.
type Handler func(*Conn)
type server struct {
// Handler handles an FTC connection.
Handler
basePath string
cookieName string
clients *clientSet // The set of connections (some may be closed).
wsServer *websocket.Server // The underlying WebSocket server.
}
// The defaults for options passed to the server.
const (
defaultBasePath = "/engine.io/"
defaultCookieName = "io"
)
// Options are the parameters passed to the server.
type Options struct {
// BasePath is the base URL path that the server handles requests for.
BasePath string
// CookieName is the name of the cookie set upon successful handshake.
CookieName string
}
// NewServer allocates and returns a new server with the given
// options and handler. If nil options are passed, the defaults
// specified in the constants above are used instead.
func NewServer(o *Options, h Handler) *server {
opts := Options{}
if o != nil {
opts = *o
}
if len(opts.BasePath) == 0 {
opts.BasePath = defaultBasePath
}
if len(opts.CookieName) == 0 {
opts.CookieName = defaultCookieName
}
s := &server{
Handler: h,
basePath: opts.BasePath,
cookieName: opts.CookieName,
clients: &clientSet{clients: map[string]*conn{}},
}
go s.startReaper()
s.wsServer = &websocket.Server{Handler: s.wsHandler}
return s
}
// startReaper continuously removes closed connections from the
// client set via the reap function.
func (s *server) startReaper() {
for {
if s.clients == nil {
glog.Fatal("server cannot have a nil client set")
}
s.clients.reap()
numClients.Set(int64(s.clients.len()))
time.Sleep(clientReapTimeout)
}
}
// handlePacket takes the given packet and writes the appropriate
// response to the given connection.
func (s *server) handlePacket(p packet, c *conn) error {
glog.Infof("handling packet type: %c, data: %s, upgraded: %t", p.typ, p.data, c.upgraded())
var encode func(packet) error
if c.upgraded() {
encode = newPacketEncoder(c).encode
} else {
encode = func(pkt packet) error {
return newPayloadEncoder(c).encode([]packet{pkt})
}
}
switch p.typ {
case packetTypePing:
return encode(packet{typ: packetTypePong, data: p.data})
case packetTypeMessage:
if c.pubConn != nil {
c.pubConn.onMessage(p.data)
}
case packetTypeClose:
c.Close()
}
return nil
}
// wsHandler continuously receives on the given WebSocket
// connection and delegates the packets received to the
// appropriate handler functions.
func (s *server) wsHandler(ws *websocket.Conn) {
// If the client initially attempts to connect directly using
// WebSocket transport, the session ID parameter will be empty.
// Otherwise, the connection with the given session ID will
// need to be upgraded.
glog.Infoln("Starting websocket handler...")
var c *conn
wsEncoder, wsDecoder := newPacketEncoder(ws), newPacketDecoder(ws)
for {
if c != nil {
var pkt packet
if err := wsDecoder.decode(&pkt); err != nil {
glog.Errorf("could not decode packet: %v", err)
break
}
glog.Infof("WS: got packet type: %c, data: %s", pkt.typ, pkt.data)
if pkt.typ == packetTypeUpgrade {
// Upgrade the connection to use this WebSocket Conn.
c.upgrade(ws)
continue
}
if err := s.handlePacket(pkt, c); err != nil {
glog.Errorf("could not handle packet: %v", err)
break
}
continue
}
id := ws.Request().FormValue(paramSessionID)
c = s.clients.get(id)
if len(id) > 0 && c == nil {
serverError(ws, errorUnknownSID)
break
} else if len(id) > 0 && c != nil {
// The initial handshake requires a ping (2) and pong (3) echo.
var pkt packet
if err := wsDecoder.decode(&pkt); err != nil {
glog.Errorf("could not decode packet: %v", err)
continue
}
glog.Infof("WS: got packet type: %c, data: %s", pkt.typ, pkt.data)
if pkt.typ == packetTypePing {
glog.Infof("got ping packet with data %s", pkt.data)
if err := wsEncoder.encode(packet{typ: packetTypePong, data: pkt.data}); err != nil {
glog.Errorf("could not encode pong packet: %v", err)
continue
}
// Force a polling cycle to ensure a fast upgrade.
glog.Infoln("forcing polling cycle")
payload := []packet{packet{typ: packetTypeNoop}}
if err := newPayloadEncoder(c).encode(payload); err != nil {
glog.Errorf("could not encode packet to force polling cycle: %v", err)
continue
}
}
} else if len(id) == 0 && c == nil {
// Create a new connection with this WebSocket Conn.
c = newConn()
c.ws = ws
s.clients.add(c)
b, err := handshakeData(c)
if err != nil {
glog.Errorf("could not get handshake data: %v", err)
}
if err := wsEncoder.encode(packet{typ: packetTypeOpen, data: b}); err != nil {
glog.Errorf("could not encode open packet: %v", err)
break
}
if s.Handler != nil {
go s.Handler(c.pubConn)
}
}
}
glog.Infof("closing websocket connection %p", ws)
c.Close()
}
// pollingHandler handles all XHR polling requests to the server, initiating
// a handshake if the request’s session ID does not already exist within
// the client set.
func (s *server) pollingHandler(w http.ResponseWriter, r *http.Request) {
setPollingHeaders(w, r)
id := r.FormValue(paramSessionID)
if len(id) > 0 {
c := s.clients.get(id)
if c == nil {
serverError(w, errorUnknownSID)
return
}
if r.Method == "POST" {
var payload []packet
if err := newPayloadDecoder(r.Body).decode(&payload); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer r.Body.Close()
for _, pkt := range payload {
s.handlePacket(pkt, c)
}
fmt.Fprintf(w, "ok")
return
} else if r.Method == "GET" {
glog.Infoln("GET request xhr polling data...")
// TODO(andybons): Requests can pile up, here. Drain the conn and
// then write the payload.
if _, err := io.Copy(w, c); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
return
}
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
s.pollingHandshake(w, r)
}
// pollingHandshake creates a new FTC Conn with the given HTTP Request and
// ResponseWriter, setting a persistence cookie if necessary and calling
// the server’s Handler.
func (s *server) pollingHandshake(w http.ResponseWriter, r *http.Request) {
c := newConn()
s.clients.add(c)
if len(s.cookieName) > 0 {
http.SetCookie(w, &http.Cookie{
Name: s.cookieName,
Value: c.id,
})
}
b, err := handshakeData(c)
if err != nil {
glog.Errorf("could not get handshake data: %v", err)
}
payload := []packet{packet{typ: packetTypeOpen, data: b}}
if err := newPayloadEncoder(w).encode(payload); err != nil {
glog.Errorf("could not encode open payload: %v", err)
return
}
if s.Handler != nil {
go s.Handler(c.pubConn)
}
}
// ServeHTTP implements the http.Handler interface for an FTC Server.
func (s *server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
remoteAddr := r.Header.Get("X-Forwarded-For")
if len(remoteAddr) == 0 {
remoteAddr = r.RemoteAddr
}
glog.Infof("%s (%s) %s %s %s", r.Proto, r.Header.Get("X-Forwarded-Proto"), r.Method, remoteAddr, r.URL)
transport := r.FormValue(paramTransport)
if strings.HasPrefix(r.URL.Path, s.basePath) && !validTransports[transport] {
serverError(w, errorTransportUnknown)
return
}
if transport == transportWebSocket {
s.wsServer.ServeHTTP(w, r)
} else if transport == transportPolling {
s.pollingHandler(w, r)
}
}
// handshakeData returns the JSON encoded data needed
// for the initial connection handshake.
func handshakeData(c *conn) ([]byte, error) {
return json.Marshal(map[string]interface{}{
"pingInterval": 25000,
"pingTimeout": 60000,
"upgrades": getValidUpgrades(),
"sid": c.id,
})
}
// serverError sends a JSON-encoded message to the given io.Writer
// with the given error code.
func serverError(w io.Writer, code int) {
if rw, ok := w.(http.ResponseWriter); ok {
rw.Header().Set("Content-Type", "application/json")
rw.WriteHeader(http.StatusBadRequest)
}
msg := struct {
Code int `json:"code"`
Message string `json:"message"`
}{
Code: code,
Message: errorMessage[code],
}
if err := json.NewEncoder(w).Encode(msg); err != nil {
glog.Errorln("error encoding error msg %+v: %s", msg, err)
return
}
glog.Errorf("wrote server error: %+v", msg)
}
// setPollingHeaders sets the appropriate headers when responding
// to an XHR polling request.
func setPollingHeaders(w http.ResponseWriter, r *http.Request) {
origin := r.Header.Get("Origin")
if len(origin) > 0 {
w.Header().Set("Access-Control-Allow-Credentials", "true")
} else {
origin = "*"
}
w.Header().Set("Access-Control-Allow-Origin", origin)
w.Header().Set("Connection", "keep-alive")
w.Header().Set("Content-Type", "text/plain; charset=UTF-8")
}
| [
5
] |
package structs
type UserLoginParams struct {
UserName string `valid:"Required;MaxSize(20)" form:"user_name"`
Password string `valid:"Required;MinSize(6);MaxSize(16)" form:"password"`
} //用户登录参数 | [
3
] |
package main
import (
"fmt"
"io/ioutil"
"math"
"os"
"strings"
)
const TOTALROWS = 128
const TOTALCOLUMNS = 8
func main() {
f, _ := os.Open("day5_input.txt")
b, _ := ioutil.ReadAll(f)
input_string := string(b)
lines := strings.Split(input_string, "\n")
lines = lines[0 : len(lines)-1]
var seats [][]int = make([][]int, TOTALROWS)
for s := range seats {
column := make([]int, TOTALCOLUMNS)
seats[s] = column
}
for _, line := range lines {
row := binarySearch(line[:7], TOTALROWS-1) // subtract 1 because indexed by 0
column := binarySearch(line[7:], TOTALCOLUMNS-1) // subtract 1 because indexed by 0
seats[row][column] = -1
}
for i, columns := range seats {
for j := range columns {
if seats[i][j] == 0 && i > 5 && i < 124 { // make sure not at the "very front or back" of plane
fmt.Println(i*8 + j)
}
}
}
}
func binarySearch(in string, upperBound int) int {
lower := 0
upper := upperBound
for _, c := range in {
diff := upper - lower // difference between high and low
switch string(c) {
case "F", "L":
upper = upper - int(math.Ceil(float64(diff)/2.0))
case "B", "R":
lower = lower + int(math.Ceil(float64(diff)/2.0))
}
}
if len(in) > 3 {
return lower
}
return upper
}
func makeRange(min, max int) []int {
a := make([]int, max-min+1)
for i := range a {
a[i] = min + i
}
return a
}
| [
0,
1
] |
package time_series
import (
"fmt"
common "github.com/lukaszozimek/alpha-vantage-api-client"
)
const (
ONE_MINUTE = "1min"
FIVE_MINUTE = "5min"
FIFITHTEEN_MINUTE = "15min"
THIRTY_MINUTE = "30min"
SIXTY_MINUTE = "60min"
)
func TimeSeriesIntraDayInterval1minute(symbol string, apiKey string, c *common.Client) *AlphaVantageTimeSeriesApiResponse {
return timeSeriesIntraDay(symbol, ONE_MINUTE, apiKey, c)
}
func TimeSeriesIntraDayInterval5minute(symbol string, apiKey string, c *common.Client) *AlphaVantageTimeSeriesApiResponse {
return timeSeriesIntraDay(symbol, FIVE_MINUTE, apiKey, c)
}
func TimeSeriesIntraDayIntervalFifteenMinute(symbol string, apiKey string, c *common.Client) *AlphaVantageTimeSeriesApiResponse {
return timeSeriesIntraDay(symbol, FIFITHTEEN_MINUTE, apiKey, c)
}
func TimeSeriesIntraDayIntervalThirtyMinute(symbol string, apiKey string, c *common.Client) *AlphaVantageTimeSeriesApiResponse {
return timeSeriesIntraDay(symbol, THIRTY_MINUTE, apiKey, c)
}
func TimeSeriesIntraDayIntervalSixtyMinute(symbol string, apiKey string, c *common.Client) *AlphaVantageTimeSeriesApiResponse {
return timeSeriesIntraDay(symbol, SIXTY_MINUTE, apiKey, c)
}
func timeSeriesIntraDay(symbol string, interval string, apiKey string, c *common.Client) *AlphaVantageTimeSeriesApiResponse {
return makeApiCallGet(fmt.Sprintf(c.BaseURL.String()+"/query?function=TIME_SERIES_INTRADAY&symbol=%v&interval=%v&apikey=%v", symbol, interval, apiKey), c)
}
| [
6
] |
package iplookup
import (
"strings"
"github.com/garyburd/redigo/redis"
"../db"
"fmt"
)
type IpInfo struct {
ID string //id编号
IP string //ip段
StartIP string //开始IP
EndIP string //结束IP
Country string //国家
Province string //省
City string //市
District string //区
Isp string //运营商
Type string //类型
Desc string //说明
}
func FindIpInfo(id string) (ipInfo IpInfo, err error) {
v1, e := redis.String(db.Cli().Do("HGET", "ip_info", id))
if e != nil {
return ipInfo, fmt.Errorf("find ip info err. redis: id:", id)
}
str := strings.Trim(v1, "\n")
strArr := strings.Split(str, ",")
ipInfo.ID = strArr[0]
ipInfo.IP = strArr[1]
ipInfo.StartIP = strArr[2]
ipInfo.EndIP = strArr[3]
ipInfo.Country = strArr[4]
ipInfo.Province = strArr[5]
ipInfo.City = strArr[6]
ipInfo.District = strArr[7]
ipInfo.Isp = strArr[8]
ipInfo.Type = strArr[9]
ipInfo.Desc = strArr[10]
return ipInfo, nil
} | [
3
] |
package main
import (
"fmt"
"math/rand"
"os"
//"text/tabwriter"
"strconv"
"time"
"sort"
)
/*
func myQuicksort (list []int) []int {
if len(list) <= 1 {
return list
}
}
func findPivot(list []int) int {
listLen = len(list)
if listLen < 3 {
return list[0]
}
first := list[0]
middle := list[listLen/2]
last := list[listLen]
if first > middle && first < last {
return first
}
if middle > first && middle < last {
return middle
}
if last > first && last <
}
*/
type ByNumb []int
func (a ByNumb) Len() int {
return len(a)
}
func (a ByNumb) Swap(i, j int) {
a[i], a[j] = a[j], a[i]
}
func (a ByNumb) Less(i, j int) bool {
return a[i] < a[j]
}
func main() {
count, _ := strconv.Atoi(os.Args[1])
r := rand.New(rand.NewSource(time.Now().UnixNano()))
list := make([]int, count)
for i := 0; i < count; i++ {
list[i] = r.Intn(100)
}
sort.Sort(ByNumb(list))
fmt.Println(list)
}
| [
3
] |
// Package logrus_pgx provides ability to use Logrus with PGX
package logrus_pgx
import (
"github.com/sirupsen/logrus"
)
// pgxLogger type, used to extend standard logrus logger.
type PgxLogger logrus.Logger
// pgxEntry type, used to extend standard logrus entry.
type PgxEntry logrus.Entry
//Print and format debug message using logrus.
func (w *PgxLogger) Debug(msg string, vars ...interface{}) {
f := logrus.Fields{}
for i := 0; i < len(vars)/2; i++ {
f[vars[i*2].(string)] = vars[i*2+1]
}
(*logrus.Logger)(w).WithFields(f).Debug(msg)
}
//Print and format error message using logrus.
func (w *PgxLogger) Error(msg string, vars ...interface{}) {
f := logrus.Fields{}
for i := 0; i < len(vars)/2; i++ {
f[vars[i*2].(string)] = vars[i*2+1]
}
(*logrus.Logger)(w).WithFields(f).Error(msg)
}
//Print and format info message using logrus.
func (w *PgxLogger) Info(msg string, vars ...interface{}) {
f := logrus.Fields{}
for i := 0; i < len(vars)/2; i++ {
f[vars[i*2].(string)] = vars[i*2+1]
}
(*logrus.Logger)(w).WithFields(f).Info(msg)
}
//Print and format warning message using logrus.
func (w *PgxLogger) Warn(msg string, vars ...interface{}) {
f := logrus.Fields{}
for i := 0; i < len(vars)/2; i++ {
f[vars[i*2].(string)] = vars[i*2+1]
}
(*logrus.Logger)(w).WithFields(f).Warn(msg)
}
//Print and format debug message using logrus.
func (w *PgxEntry) Debug(msg string, vars ...interface{}) {
f := logrus.Fields{}
for i := 0; i < len(vars)/2; i++ {
f[vars[i*2].(string)] = vars[i*2+1]
}
(*logrus.Entry)(w).WithFields(f).Debug(msg)
}
//Print and format error message using logrus.
func (w *PgxEntry) Error(msg string, vars ...interface{}) {
f := logrus.Fields{}
for i := 0; i < len(vars)/2; i++ {
f[vars[i*2].(string)] = vars[i*2+1]
}
(*logrus.Entry)(w).WithFields(f).Error(msg)
}
//Print and format info message using logrus.
func (w *PgxEntry) Info(msg string, vars ...interface{}) {
f := logrus.Fields{}
for i := 0; i < len(vars)/2; i++ {
f[vars[i*2].(string)] = vars[i*2+1]
}
(*logrus.Entry)(w).WithFields(f).Info(msg)
}
//Print and format warning message using logrus.
func (w *PgxEntry) Warn(msg string, vars ...interface{}) {
f := logrus.Fields{}
for i := 0; i < len(vars)/2; i++ {
f[vars[i*2].(string)] = vars[i*2+1]
}
(*logrus.Entry)(w).WithFields(f).Warn(msg)
}
| [
3
] |
package service
import (
"gin-vue-admin/global"
"gin-vue-admin/model"
)
func CreateMessage(message model.Message) (err error) {
//global.GVA_DB.AutoMigrate(&message)
err = global.GVA_DB.Create(&message).Error
return err
} | [
3
] |
package url_test
import (
"fmt"
"testing"
"github.com/barolab/candidate/url"
)
type WithoutQueryTestCase struct {
argument string
expected string
err error
}
func TestWithoutQuery(T *testing.T) {
cases := []WithoutQueryTestCase{
{argument: "", expected: "", err: fmt.Errorf("Cannot exclude URL query from an empty URL")},
{argument: "https://twitter.com/candidate", expected: "https://twitter.com/candidate", err: nil},
{argument: "https://twitter.com/candidate?this=that", expected: "https://twitter.com/candidate", err: nil},
}
for _, c := range cases {
res, err := url.WithoutQuery(c.argument)
if res != c.expected {
T.Errorf("WithoutQuery should have return %s for url %s, got %s", c.expected, c.argument, res)
}
if err == nil && c.err != nil {
T.Errorf("WithoutQuery returned no error but we expected to return %s (for url %s)", c.err, c.argument)
}
if err != nil && c.err == nil {
T.Errorf("WithoutQuery returned an error %s that was not expected (for url %s)", err, c.argument)
}
if err != nil && c.err != nil && err.Error() != c.err.Error() {
T.Errorf("WithoutQuery should have returned an error %s, but we got %s (for url %s)", c.err, err, c.argument)
}
}
}
| [
2
] |
package main
import "fmt"
func main() {
var numbers []int
printSlice(numbers)
//允许追加空切片
numbers = append(numbers,0)
printSlice(numbers)
//向空切片添加一个元素
numbers = append(numbers,1)
printSlice(numbers)
//同时添加多个元素
numbers = append(numbers,2,3,4)
printSlice(numbers)
//创建切片 number1 是之前 切片容量的两倍,容量的值只有1,2,4,6,8
numbers1:= make([]int,len(numbers),(cap(numbers))*2)
//拷贝 number 的内容到number1
copy(numbers1,numbers)
printSlice(numbers1)
}
func printSlice (x []int){
fmt.Printf("len=%d cap=%d slice=%v\n",len(x),cap(x),x)
} | [
3
] |
package gragh
func dijkstraMatrix(g *matrix, src int) (dist []int, sptSet []int) {
sptSet = make([]int, g.n)
dist = make([]int, g.n)
pred := make([]int, g.n)
for i := range dist {
dist[i] = INF
sptSet[i] = -1
pred[i] = -1
}
dist[src] = 0
pred[src] = src
for i := 0; i < g.n; i++ {
mindist := INF
minvert := src
// find shortest distance vertex that not in spt set
for j := 0; j < g.n; j++ {
if sptSet[j] == -1 && dist[j] < mindist {
mindist = dist[j]
minvert = j
}
}
if minvert == INF {
break // the remaining vertex are unreachable from src, thus we can break here
}
// update shortest distance
for j := 0; j < g.n; j++ {
curdist := g.get(minvert, j) // this also works from directed graph
if curdist > 0 && dist[minvert]+curdist < dist[j] {
dist[j] = dist[minvert] + curdist
pred[j] = minvert
}
}
sptSet[minvert] = pred[minvert]
}
return
}
func dijkstraAdjacent(g *graph, src int) (mindist []int, sptSet []int) {
sptSet = make([]int, g.n)
indices := make([]int, g.n)
mindist = make([]int, g.n)
pred := make([]int, g.n)
position := make([]int, g.n)
for i := 0; i < g.n; i++ {
sptSet[i] = -1
indices[i] = i
position[i] = i
mindist[i] = INF
}
h := &heap{indices, pred, mindist, position}
h.mindist[src] = 0 // minimum distance for source vertex is 0
h.minfrom[src] = src // source vertex's pred is itself
h.siftUp(src) // src's value is least, sift up to stack top
for num := 0; num < g.n; num++ {
i := h.pop() // pop vertex with minimum distance with mst set
if mindist[i] == INF {
break
}
for nb := g.adjacency[i]; nb != nil; nb = nb.next {
j := nb.id // update shortest distance between src and j via i
if h.mindist[i]+nb.weight < h.mindist[j] {
h.mindist[j] = h.mindist[i] + nb.weight
h.minfrom[j] = i
if !h.siftDown(h.position[j]) { // need to sift after modification
h.siftUp(h.position[j])
}
}
}
// set mst set
sptSet[i] = h.minfrom[i]
}
return
}
func floydWarshall(g *matrix) [][]int {
// this solution has not provide path information
// however it can be achieved by using another 2D array to store the predecessor.
dist := make([][]int, g.n)
for i := range dist {
dist[i] = make([]int, g.n)
for j := range dist[i] {
// initialize the distance matrix
if i == j {
dist[i][j] = 0 // dist[i][i]=0
} else if g.get(i, j) == 0 {
dist[i][j] = INF // dist[i][j]=INF if i, j is not directed linked
} else {
dist[i][j] = g.get(i, j) // real distance
}
}
}
// floyd-warshall algorithm
for k := 0; k < g.n; k++ { // k represent the intermediate vertex, outermost loop
for i := 0; i < g.n; i++ {
for j := 0; j < g.n; j++ {
// dist[i][k] and dist[k][j] should not be INF to avoid overflow
if dist[i][k] != INF && dist[k][j] != INF && dist[i][k]+dist[k][j] < dist[i][j] {
dist[i][j] = dist[i][k] + dist[k][j] // this also works from directed graph
}
}
}
}
return dist
}
| [
5,
6
] |
package install
import (
"fmt"
"io"
"net/http"
"os"
"os/exec"
)
func PathExists(path, fileName string) bool {
filePath := path + fileName
_, err := os.Stat(filePath)
if err == nil {
return true
}
if os.IsNotExist(err) {
return false
}
return false
}
func Download(FileName string, FilePath string) {
var url = FileUrl + FileName
res, err := http.Get(url)
if err != nil {
panic(err)
}
defer res.Body.Close()
f, err := os.Create(FilePath + FileName)
if err != nil {
panic(err)
}
defer f.Close()
io.Copy(f, res.Body)
}
func FileTar(Filename, FilePath, FileNewName string) {
shellcmd := "tar xf " + Filename +".tar.gz -C " + FilePath
cmd := exec.Command("/bin/bash", "-c", shellcmd)
_, err := cmd.Output()
if err != nil {
fmt.Println(err, "")
}
shellcmdmv := "mv " + Filename + " /usr/local/" + FileNewName
cmdmv := exec.Command("/bin/bash", "-c", shellcmdmv)
_, err = cmdmv.Output()
if err != nil {
fmt.Println(err)
}
} | [
2,
6
] |
/* SPDX-License-Identifier: MIT
*
* Copyright (C) 2019 WireGuard LLC. All Rights Reserved.
*/
package guid
import (
"fmt"
"syscall"
"golang.org/x/sys/windows"
)
//sys clsidFromString(lpsz *uint16, pclsid *windows.GUID) (hr int32) = ole32.CLSIDFromString
//
// FromString parses "{XXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}" string to GUID.
//
func FromString(str string) (*windows.GUID, error) {
strUTF16, err := syscall.UTF16PtrFromString(str)
if err != nil {
return nil, err
}
guid := &windows.GUID{}
hr := clsidFromString(strUTF16, guid)
if hr < 0 {
return nil, syscall.Errno(hr)
}
return guid, nil
}
//
// ToString function converts GUID to string
// "{XXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}".
//
// The resulting string is uppercase.
//
func ToString(guid *windows.GUID) string {
return fmt.Sprintf("{%06X-%04X-%04X-%04X-%012X}", guid.Data1, guid.Data2, guid.Data3, guid.Data4[:2], guid.Data4[2:])
}
| [
3
] |
package stats
import (
"github.com/fm2901/bank/v2/pkg/types"
)
func Avg(payments []types.Payment) types.Money {
var allSum types.Money
var allCount types.Money
if len(payments) < 1 {
return 0
}
for _, payment := range payments {
if payment.Status == types.StatusFail {
continue
}
allSum += payment.Amount
allCount += 1
}
return allSum / allCount
}
func TotalInCategory(payments []types.Payment, category types.Category) types.Money {
var sumInCategory types.Money
if len(payments) < 1 {
return 0
}
for _, payment := range payments {
if payment.Category != category || payment.Status == types.StatusFail {
continue
}
sumInCategory += payment.Amount
}
return sumInCategory
}
func CategoriesAvg(payments []types.Payment) map[types.Category]types.Money {
categories := map[types.Category]types.Money{}
counter := map[types.Category]int{}
for _, payment := range payments {
if payment.Amount > 0 {
categories[payment.Category] += payment.Amount
counter[payment.Category] += 1
}
}
for cat := range categories {
categories[cat] = categories[cat] / types.Money(counter[cat])
}
return categories
}
func PeriodsDynamic(
first map[types.Category]types.Money,
second map[types.Category]types.Money,
) map[types.Category]types.Money {
result := map[types.Category]types.Money{}
for key := range second {
result[key] += second[key]
}
for key := range first {
result[key] -= first[key]
}
return result
}
| [
0,
6
] |
//Priority Queue in Golang
/*
In the Push and Pop method we are using interface
Learn these :
interface{} is the empty interface type
[]interface{} is a slice of type empty interface
interface{}{} is an empty interface type composite literal
[]interface{}{} is a slice of type empty interface composite literals
What does interface{} meaning in Push and Pop operations ??
interface{} means you can put value of any type, including your own custom type. All types in Go satisfy an empty interface (interface{} is an empty interface).
In your example, Msg field can have value of any type.
Example:
package main
import (
"fmt"
)
type Body struct {
Msg interface{}
}
func main() {
b := Body{}
b.Msg = "5"
fmt.Printf("%#v %T \n", b.Msg, b.Msg) // Output: "5" string
b.Msg = 5
fmt.Printf("%#v %T", b.Msg, b.Msg) //Output: 5 int
}
*/
package main
import (
"container/heap"
"fmt"
)
type Item struct {
Name string
Expiry int
Price int
Index int
}
type PriorityQueue []*Item
//In order to sort the priority queue , implement the
/* type Interface interface {
// Len is the number of elements in the collection.
Len() int
// Less reports whether the element with
// index i should sort before the element with index j.
Less(i, j int) bool
// Swap swaps the elements with indexes i and j.
Swap(i, j int)
*/
func (pq PriorityQueue) Len() int {
return len(pq)
}
func (pq PriorityQueue) Less(i, j int) bool {
fmt.Println("pq[i].Name pq[j].Name", pq[i].Name, pq[j].Name)
fmt.Println("pq[i].Expiry pq[j].Expiry", pq[i].Expiry, pq[j].Expiry)
if pq[i].Expiry < pq[j].Expiry {
return true
} else if pq[i].Expiry == pq[j].Expiry {
return pq[i].Price > pq[j].Price
}
return false
}
func (pq PriorityQueue) Swap(i, j int) {
pq[i], pq[j] = pq[j], pq[i]
}
func (pq *PriorityQueue) Pop() interface{} {
old := *pq
n := len(old)
item := old[n-1]
*pq = old[0 : n-1]
return item
}
func (pq *PriorityQueue) Push(x interface{}) {
//n := len(*pq)
item := x.(*Item)
*pq = append(*pq, item)
}
func main() {
listItems := []*Item{
{Name: "Spinach", Expiry: 5, Price: 20},
}
/*
{Name: "Carrot", Expiry: 30, Price: 120},
{Name: "Potato", Expiry: 30, Price: 45},
{Name: "Rice", Expiry: 100, Price: 50},
*/
priorityQueue := make(PriorityQueue, len(listItems))
for i, item := range listItems {
priorityQueue[i] = item
}
/*
* Here couple of things need to be considered :
* heap works on pointers , for example: Both heap push and pop works on pointers
* See the signature.
* We should know how to work on interface. For example heap pop returns an interface
* It should be converted to corresponding type of object.
* heap.Pop(&priorityQueue).(*Item) , here Item is a pointer type.
* Because while inserting into priorityQueue, we are inserting a pointer of Item type.
*
* The defination of Push and Pop operation should remain same, only logic should be added
* in Less() method as per the requirement.
*
* Unlike python we need to handle equal cases in Less() operation.
*/
heap.Init(&priorityQueue)
heap.Push(&priorityQueue, &Item{Name: "Potato", Expiry: 30, Price: 45})
heap.Push(&priorityQueue, &Item{Name: "Carrot", Expiry: 30, Price: 120})
item := heap.Pop(&priorityQueue).(*Item)
fmt.Printf("Name %s Expiry:%d\n", item.Name, item.Expiry)
for priorityQueue.Len() > 0 {
item = heap.Pop(&priorityQueue).(*Item)
fmt.Printf("Name %s Expiry:%d\n", item.Name, item.Expiry)
}
}
| [
3
] |
package mocks
import (
"app/models"
"reflect"
"testing"
"time"
)
func TestUserMock(t *testing.T) {
ID := 0
users := &UserMock{}
user := &models.User{
ID: ID,
Name: "test user",
Email: "[email protected]",
Icon: "testicon",
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
users.CreateUser(user)
got, err := users.GetUser(ID)
if err != nil {
t.Fatalf("An error occurred: %v", err)
}
if !reflect.DeepEqual(*user, *got) {
t.Fatalf("Not equal user")
}
}
func TestUserMockRepository(t *testing.T) {
users := NewUserRepository()
ID := 0
user := &models.User{
ID: ID,
Name: "test user",
Email: "[email protected]",
Icon: "testicon",
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
users.CreateUser(user)
got, err := users.GetUser(ID)
if err != nil {
t.Fatalf("An error occurred: %v", err)
}
if !reflect.DeepEqual(*user, *got) {
t.Fatalf("Not equal user")
}
}
func TestUser(t *testing.T) {
users := NewUserRepository()
ID := 0
Email := "[email protected]"
now := time.Now()
user := &models.User{
ID: ID,
Name: "test user",
Email: Email,
Icon: "testicon",
CreatedAt: now,
UpdatedAt: now,
}
users.CreateUser(user)
Email2 := "[email protected]"
user2 := &models.User{
ID: ID,
Name: "test2 user",
Email: Email2,
Icon: "test2icon",
CreatedAt: now,
UpdatedAt: time.Now(),
}
users.UpdateUser(user2)
u, err := users.GetUser(ID)
if err != nil {
t.Fatalf("An error occurred: %v\n", err)
}
users.UpdateUser(user2)
if testCompareUsers(t, user, u) {
t.Fatalf("User did not update")
}
if !testCompareUsers(t, user2, u) {
t.Fatalf("User did not update")
}
u, err = users.GetUserByEmail(Email)
if err == nil {
t.Fatalf("Invalid email, but got user")
}
u, err = users.GetUserByEmail(Email2)
if err != nil {
t.Fatalf("Valid email, but got error")
}
if !testCompareUsers(t, user2, u) {
t.Fatalf("Users don't match")
}
users.DeleteUser(ID)
u, err = users.GetUser(ID)
if err == nil {
t.Fatalf("An error occurred: %v\n", err)
}
if u != nil {
t.Fatalf("User did not delete")
}
}
func testCompareUsers(t *testing.T, user *models.User, user2 *models.User) bool {
if user.ID != user2.ID {
return false
}
if user.Name != user2.Name {
return false
}
if user.Email != user2.Email {
return false
}
if user.HashedPassword != user2.HashedPassword {
return false
}
if user.Icon != user2.Icon {
return false
}
return true
}
func TestUserError(t *testing.T) {
user := &models.User{
ID: 1,
Name: "test name",
Email: "[email protected]",
Icon: "test icon",
}
users := NewUserRepository()
err := users.UpdateUser(user)
if err == nil {
t.Fatalf("An error should occur")
}
err = users.DeleteUser(user.ID)
if err == nil {
t.Fatalf("An error should occur")
}
}
| [
6
] |
package main
import (
"fmt"
"math/rand"
"sort"
"time"
)
func main() {
rand.Seed(time.Now().UnixNano())
numPrisoners := 1000
numHats := 1001
// there are 1000 prisoners
prisoners := make([]int, numPrisoners)
// and 1001 hats
hats := make([]int, numHats)
for i := 0; i < numHats; i++ {
hats[i] = i + 1
}
// the hats are randomly sorted and one is removed
rand.Shuffle(len(hats), func(i, j int) { hats[i], hats[j] = hats[j], hats[i] })
hats = hats[:len(hats)-1]
// the prisoners are all assigned hats
for i := range prisoners {
prisoners[i] = hats[i]
}
correctlyGuessedBefore := []int{}
discardedThroughStrategy := []int{}
priorGuess := 0
prisonersSurvived := 0
prisonersDied := 0
// the prisoners take turns, from left to right, to make a choice
for i := range prisoners {
// we can look in front and see what we're missing from a set of 1001
missing := make(map[int]struct{}, 1000)
for j := 0; j < numHats; j++ {
missing[j+1] = struct{}{}
}
for j := i + 1; j < len(prisoners); j++ {
delete(missing, prisoners[j])
}
// remove prior guesses we've remembered
for _, j := range correctlyGuessedBefore {
delete(missing, j)
}
for _, j := range discardedThroughStrategy {
delete(missing, j)
}
// we should be left with some number of options. I need to choose in
// such a way that informs the person in front of what to choose
missingSlice := make([]int, 0, len(missing))
for j := range missing {
missingSlice = append(missingSlice, j)
}
sort.Ints(missingSlice)
guess := 0
if i == 0 {
// first prisoner does something a little different, they tell i + 1
// their number which eliminates the remaining choice
guess = prisoners[i+1]
} else if i == 1 {
// the second prisoner follows this strategy and can eliminate
// everything
guess = priorGuess
// we can just discard all
discardedThroughStrategy = append(discardedThroughStrategy, missingSlice...)
} else {
guess = missingSlice[len(missingSlice)-1]
}
priorGuess = guess
// take a guess at the minimum
if guess == prisoners[i] {
correctlyGuessedBefore = append(correctlyGuessedBefore, guess)
prisonersSurvived++
} else {
prisonersDied++
}
}
fmt.Println(prisonersSurvived, prisonersDied)
}
| [
5
] |
package db
import (
"database/sql"
"fmt"
"time"
"gopkg.in/vmihailenco/msgpack.v2"
)
func (db DB) EnsureQueuesExist(queueNames []string) error {
for _, queueName := range queueNames {
if err := db.FirstOrCreate(&Queue{}, Queue{Name: queueName}).Error; err != nil {
return fmt.Errorf("couldn't create queue %s: %v", queueName, err)
}
}
return nil
}
func (db DB) PopJobFrom(queueNames []string, processID uint) (*Job, error) {
var id int
var job Job
tx := db.Begin()
err := tx.Raw(`
SELECT id FROM jobs
WHERE queue_name IN (?) AND state = ? AND start_at <= ?
ORDER BY enqueued_at ASC LIMIT 1 FOR UPDATE`, queueNames, JobEnqueued, time.Now()).Row().Scan(&id)
if err != nil && err != sql.ErrNoRows {
tx.Rollback()
return nil, err
} else if err == sql.ErrNoRows {
tx.Rollback()
return nil, nil
}
err = tx.Model(&job).Where("id = ?", id).Update(Job{State: JobRunning, ProcessID: &processID}).Error
if err != nil {
tx.Rollback()
return nil, err
}
if err = tx.Commit().Error; err != nil {
tx.Rollback()
return nil, err
}
if err = db.First(&job, id).Error; err != nil {
return nil, err
}
if err = msgpack.Unmarshal(job.ParamBlob, &job.Params); err != nil {
// TODO(as3richa) - record this failure
return nil, err
}
return &Job, nil
}
func (db DB) PushJob(job *Job) error {
return db.Create(job).Error
}
func (db DB) FinishJob(job *Job) error {
return db.Model(job).Where("id = ?", job.ID).Update("state", JobFinished).Error
}
func (db DB) FailJob(job *Job) error {
return db.Model(job).Where("id = ?", job.ID).Update("state", JobFailed).Error
}
func (db DB) BuildJob(
queueName string,
jobName string,
params []interface{},
startAfter time.Time,
retryCount uint,
) (*Job, error) {
serializedParams, err := msgpack.Marshal(params)
if err != nil {
return nil, fmt.Errorf("couldn't marshal parameters: %v", err)
}
return &Job{
QueueName: queueName,
Name: jobName,
ParamBlob: serializedParams,
StartAfter: startAfter,
}, nil
}
| [
6
] |
package main
import (
"fmt"
)
func main() {
//Program to print number in decimal, binary, hex
x := 10
fmt.Printf("%d,%b,%#x", x, x, x)
}
| [
3
] |
package pd
import (
"strings"
"time"
"github.com/juju/errors"
"github.com/zssky/log"
"github.com/taorenhai/ancestor/client"
"github.com/taorenhai/ancestor/meta"
)
const (
maxRetryInterval = time.Minute
minReplica = 3
)
type delayRecord struct {
timeout time.Time
interval time.Duration
}
func newDelayRecord() *delayRecord {
return &delayRecord{interval: time.Second, timeout: time.Now().Add(time.Second)}
}
func (d *delayRecord) valid() bool {
if time.Since(d.timeout) > 0 {
return false
}
return true
}
func (d *delayRecord) next() {
d.interval = d.interval * 2
if d.interval > maxRetryInterval {
d.interval = maxRetryInterval
}
d.timeout = time.Now().Add(d.interval)
}
func (s *Server) checkDelayRecord(rs *meta.RangeStatsInfo) bool {
d, ok := s.delayRecord[rs.RangeID]
if !ok {
return false
}
if d.valid() {
return true
}
if err := s.checkRangeSplit(rs); err != nil {
d.next()
log.Infof("checkRangeSplit error:%s", err.Error())
return true
}
delete(s.delayRecord, rs.RangeID)
return true
}
var (
store client.Storage
)
func (s *Server) checkReplica() {
var err error
if s.cluster.count() < minReplica {
log.Infof("current nodes count:%d, minReplica:%d", s.cluster.count(), minReplica)
return
}
if store == nil {
store, err = client.Open(strings.Join(s.cfg.EtcdHosts, ";"))
if err != nil {
log.Errorf(" client.Open(%s) error:%s", strings.Join(s.cfg.EtcdHosts, ";"), err.Error())
return
}
}
for _, rd := range s.region.unstable() {
log.Debugf("unstable range:(%+v)", rd)
var ids []meta.NodeID
var reps []meta.ReplicaDescriptor
for _, r := range rd.Replicas {
ids = append(ids, r.NodeID)
}
for len(ids) < minReplica {
n, err := s.cluster.getIdleNode(ids...)
if err != nil {
log.Errorf("getIdleNode error:%s", err.Error())
return
}
ids = append(ids, n.NodeID)
rID, err := s.newID()
if err != nil {
log.Errorf("newID error:%s", err.Error())
return
}
reps = append(reps, meta.ReplicaDescriptor{NodeID: n.NodeID, ReplicaID: meta.ReplicaID(rID)})
}
rd.Replicas = append(rd.Replicas, reps...)
for _, rep := range reps {
if err := store.GetAdmin().CreateReplica(rep.NodeID, *rd); err != nil {
log.Errorf("CreateReplica node:(%d), rd:%+v, error:%s", rep.NodeID, rd, err.Error())
continue
}
}
if err := s.region.setRangeDescriptors(*rd); err != nil {
log.Errorf("setRangeDescriptors error:%s, rd:%+v", err.Error(), rd)
}
log.Debugf("add replica success, range:(%+v)", rd)
}
}
func (s *Server) checkSplit() {
for _, ns := range s.cluster.getNodeStats() {
for _, rs := range ns.RangeStatsInfo {
if s.checkDelayRecord(rs) {
continue
}
if err := s.checkRangeSplit(rs); err != nil {
log.Errorf("checkRangeSplit error:%s", err.Error())
s.delayRecord[rs.RangeID] = newDelayRecord()
}
}
}
}
func (s *Server) checkLoop() {
s.stopper.RunWorker(func() {
ticker := time.NewTicker(time.Duration(s.cfg.CheckInterval) * time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C:
if s.cfg.RangeSplitType != splitTypeManual {
s.checkSplit()
}
s.checkReplica()
case <-s.stopper.ShouldStop():
return
}
}
})
}
func (s *Server) checkRangeSplit(r *meta.RangeStatsInfo) error {
if s.dm.checkSplit(r, s.cfg.RangeCapacity, s.cfg.RangeSplitThreshold) {
k, b, c, err := s.getSplitKey(r)
if err != nil {
log.Errorf(errors.ErrorStack(err))
return errors.Trace(err)
}
if err := s.requestSplit(r, k, b, c); err != nil {
log.Errorf(errors.ErrorStack(err))
return errors.Trace(err)
}
}
return nil
}
func (s *Server) getSplitKey(rsi *meta.RangeStatsInfo) (key meta.Key, bytes int64, count int64, err error) {
var rd *meta.RangeDescriptor
if rd, err = s.region.getRangeDescriptor(rsi.RangeID); err != nil {
return
}
req := meta.GetSplitKeyRequest{
RequestHeader: meta.RequestHeader{
Key: rd.StartKey,
RangeID: rsi.RangeID,
Flag: meta.IsRead,
},
SplitSize: rsi.TotalBytes / 2,
}
breq := meta.BatchRequest{RequestHeader: req.RequestHeader}
breq.Add(&req)
bresp := meta.BatchResponse{}
log.Infof("get split key request to node %d, range: %d, split size: %d", rsi.NodeID, req.RangeID, req.SplitSize)
if err = s.sendBatchRequestToLeader(&breq, &bresp, rd); err != nil {
return
}
key = bresp.Resp[0].GetSplitKey.SplitKey
bytes = bresp.Resp[0].GetSplitKey.RangeBytes
count = bresp.Resp[0].GetSplitKey.RangeCount
log.Infof("get split key success, split key: %v, range bytes: %d, range count: %d", key, bytes, count)
return
}
func (s *Server) requestSplit(rsi *meta.RangeStatsInfo, key meta.Key, bytes int64, count int64) error {
id, err := s.idAllocator.newID()
if err != nil {
return errors.Errorf("get new rangeID failed")
}
rd, err := s.region.getRangeDescriptor(rsi.RangeID)
if err != nil {
return errors.Trace(err)
}
req := meta.SplitRequest{
RequestHeader: meta.RequestHeader{
Key: key,
RangeID: rsi.RangeID,
Flag: meta.IsWrite,
},
SplitKey: key,
NewRangeID: meta.RangeID(id),
NewRangeBytes: bytes,
NewRangeCount: count,
}
breq := meta.BatchRequest{RequestHeader: req.RequestHeader}
breq.Add(&req)
bresp := meta.BatchResponse{}
log.Infof("split request to node %d, range %d, split key: %s, new range ID: %d, bytes: %d, count: %d",
rsi.NodeID, req.RangeID, key, id, bytes, count)
if err := s.sendBatchRequestToLeader(&breq, &bresp, rd); err != nil {
return errors.Trace(err)
}
log.Info("split request execute end")
newPostRD := bresp.Resp[0].Split.RangeDescriptors[1]
if err := s.updateRangeDescriptor(&newPostRD, rd); err != nil {
log.Errorf("update range descriptor failed, %s\n", err.Error())
return errors.Trace(err)
}
log.Info("updateRangeDescriptor end")
newPrevRD := bresp.Resp[0].Split.RangeDescriptors[0]
if err := s.region.setRangeDescriptors(newPostRD, newPrevRD); err != nil {
log.Errorf("%s\n", err.Error())
return errors.Trace(err)
}
log.Info("update prev range descriptor on pd success")
log.Info("split request success")
return nil
}
| [
0,
6
] |
package Redis
import (
"github.com/go-redis/redis/v8"
"context"
"log"
)
var ctx = context.Background()
type Redis struct{
//Main structure for redis client.
//It has connection field to save
//redis-client connection.
connection *redis.Client
}
func (r *Redis)Connect(){
//Connects to redis server.
conn := redis.NewClient(&redis.Options{
Addr: "localhost:6379",
Password: "",
DB: 0,
})
log.Println("Connected to Redis!")
r.connection = conn
}
func (r Redis)StartDataProcessing(messageChan chan string){
//Subscribes to important channel and starts to listen
//to it. Pushes gotten string messages to `messageChan`.
sub := r.connection.Subscribe(ctx, "test")
for{
msg, err := sub.ReceiveMessage(ctx)
if err != nil{
panic(err)
}
messageChan <- msg.Payload
}
}
func (r Redis)CheckIdDublesExist(StringToCheck string)bool{
result := r.connection.LRange(ctx, "authed_users", 0, -1)
list, err := result.Result()
if err != nil{
log.Fatalln(err)
}
for _, value := range(list){
if value == StringToCheck{
return true
}
}
return false
}
func (r Redis)CreateAuthRecord(randomString string){
r.connection.RPush(ctx, "authed_users", randomString)
}
| [
2,
3
] |
package guess_number_higher_or_lower
// https://leetcode.com/problems/guess-number-higher-or-lower
// level: 1
// time: O(log(n)) 0ms 100%
// space: O(1) 1.9M 100%
var pick int
func guess(num int) int {
if num == pick {
return 0
} else if num > pick {
return -1
} else {
return 1
}
}
// leetcode submit region begin(Prohibit modification and deletion)
/**
* Forward declaration of guess API.
* @param num your guess
* @return -1 if num is lower than the guess number
* 1 if num is higher than the guess number
* otherwise return 0
* func guess(num int) int;
*/
func guessNumber(n int) int {
l, r := 1, n
for l <= r {
mid := l + (r-l)>>1
diff := guess(mid)
if diff == 0 {
return mid
} else if diff > 0 {
l = mid + 1
} else {
r = mid - 1
}
}
return 0
}
// leetcode submit region end(Prohibit modification and deletion)
| [
5
] |
package main
import "fmt"
// Celsius ...
type Celsius float64
// ToF convert Celsius to Fahrenheit
func (c Celsius) ToF() Fahrenheit {
return CToF(c)
}
// Fahrenheit ...
type Fahrenheit float64
// ToC convert Celsius to Fahrenheit
func (f Fahrenheit) ToC() Celsius {
return FToC(f)
}
// const variable
const (
AbsoluteZeroC Celsius = -273.15
FreezingC Celsius = 0
BoilingC Celsius = 100
)
// CToF convert Celsius to Fahrenheit
func CToF(c Celsius) Fahrenheit { return Fahrenheit(c*9/5 + 32) }
// FToC convert Fahrenheit to Celsius
func FToC(f Fahrenheit) Celsius { return Celsius((f - 32) * 5 / 9) }
func main() {
fmt.Printf("%g\n", BoilingC-FreezingC) // "100" °C
boilingF := BoilingC.ToF()
fmt.Printf("%g\n", boilingF-FreezingC.ToF()) // "180" °F
//fmt.Printf("%g\n", boilingF-FreezingC) // compile error: type mismatch
var c Celsius
var f Fahrenheit
fmt.Println(c == 0) // "true"
fmt.Println(f >= 0) // "true"
fmt.Println(c == Celsius(f)) // "true"!
//fmt.Println(c == f) // compile error: type mismatch
}
| [
3
] |
package main
import (
"fmt"
"github.com/veandco/go-sdl2/sdl"
)
type field struct {
squares squares
selected int
}
type square struct {
R sdl.Rect
}
// ID of the square
type ID int32
type squares []square
func createSquares(startX, startY, width, height, spacing int32) (sq squares) {
var x, y int32
for i := 0; i < 3; i++ {
for j := 0; j < 3; j++ {
x = (int32(j) * (spacing + width)) + startX
y = (int32(i) * (spacing + height)) + startY
sq = append(sq, square{R: sdl.Rect{X: x, Y: y, W: width, H: height}})
}
}
return
}
func (f *field) render(r *sdl.Renderer, mp sdl.Point) {
for i, s := range f.squares {
if f.selected >= 0 && f.selected < 9 && f.selected == i {
r.SetDrawColor(100, 255, 255, 255)
} else if mp.InRect(&s.R) {
r.SetDrawColor(255, 0, 255, 255)
} else {
r.SetDrawColor(100, 0, 255, 255)
}
r.FillRect(&s.R)
}
}
func (f *field) setSelected(mp sdl.Point, p player) {
for i, s := range f.squares {
if mp.InRect(&s.R) {
f.selected = i
for peer, rw := range peers {
if peers[peer] != nil {
rw.WriteString(fmt.Sprintf("Selected: %d To: %s From: %s\n", i, peer, p.name))
rw.Flush()
} else {
fmt.Println("readWriter is null, cant write")
}
}
fmt.Println("Selected Square:", i)
}
}
}
| [
5
] |
// Take a number: 56789. Rotate left, you get 67895.
//
// Keep the first digit in place and rotate left the other digits: 68957.
//
// Keep the first two digits in place and rotate the other ones: 68579.
//
// Keep the first three digits and rotate left the rest: 68597. Now it is over since keeping the first four it remains only one digit which rotated is itself.
//
// You have the following sequence of numbers
//
// 56789 -> 67895 -> 68957 -> 68579 -> 68597
//
// and you must return the greatest: 68957.
//
// Calling this function max_rot (or maxRot or ... depending on the language
//
// max_rot(56789) should return 68957
package main
import (
"strconv"
"fmt"
)
func main() {
var n int64 = 56789
Lol := MaxRot(n)
fmt.Println(Lol)
}
func MaxRot(n int64) int64 {
b := strconv.Itoa(int(n))
x := b[1:] + string(b[0])
y := b[:1] + string(b[2:]) + string(b[1])
z := b[:2] + b[3:] + string(b[2])
a := b[:3] + b[4:] + string(b[3])
my, _ := strconv.Atoi(b)
dick, _ := strconv.Atoi(x)
so, _ := strconv.Atoi(y)
fc, _ := strconv.Atoi(z)
big, _ := strconv.Atoi(a)
if int(my) > int(dick) && int(my) > int(so) && int(my) > int(fc) && int(my) > int(big){
return int64(my)
}else if int(dick) > int(my) && int(dick) > int(so) && int(dick) > int(fc) && int(dick) > int(big){
return int64(dick)
}else if int(so) > int(dick) && int(so) > int(my) && int(so) > int(fc) && int(so) > int(big){
return int64(so)
}else if int(fc) > int(dick) && int(fc) > int(so) && int(fc) > int(my) && int(fc) > int(big){
return int64(fc)
}else if int(big) > int(dick) && int(big) > int(so) && int(big) > int(fc) && int(big) > int(my){
return int64(big)
}
return 0
}
| [
5
] |
package http_proxy_middleware
import (
"fmt"
"github.com/didi/gatekeeper/model"
"github.com/didi/gatekeeper/public"
"github.com/gin-gonic/gin"
"github.com/pkg/errors"
)
//匹配接入方式 基于请求信息
func HTTPWhiteListMiddleware() gin.HandlerFunc {
return func(c *gin.Context) {
serviceDetail, err := model.GetServiceDetailFromGinContext(c)
if err != nil {
public.ResponseError(c, 2001, err)
c.Abort()
return
}
whiteListString := serviceDetail.PluginConf.GetPath("http_whiteblacklist", "ip_white_list").MustString()
if whiteListString != "" {
if !public.InIPSliceStr(c.ClientIP(), whiteListString) {
public.ResponseError(c, 3001, errors.New(fmt.Sprintf("%s not in white ip list", c.ClientIP())))
c.Abort()
return
}
}
c.Next()
}
}
| [
3
] |
package streamtcp
import (
"bufio"
// "errors"
"fmt"
"log"
"net"
"time"
)
type CallBackClient func(*Session, string)
type Session struct {
conn net.Conn
incoming Message
outgoing Message
reader *bufio.Reader
writer *bufio.Writer
quiting chan net.Conn
name string
closing bool
messageRec CallBackClient
}
func (self *Session) GetName() string {
return self.name
}
func (self *Session) SetName(name string) {
self.name = name
}
func (self *Session) GetIncoming() []byte {
return <-self.incoming
}
func (self *Session) PutOutgoing(message []byte) {
self.outgoing <- message
}
func CreateSession(conn net.Conn, callback CallBackClient) *Session {
reader := bufio.NewReader(conn)
writer := bufio.NewWriter(conn)
session := &Session{
conn: conn,
incoming: make(Message, 1024),
outgoing: make(Message, 1024),
quiting: make(chan net.Conn),
reader: reader,
writer: writer,
messageRec: callback,
}
session.closing = false
session.Listen()
return session
}
func (self *Session) Listen() {
go self.Read()
go self.Write()
}
func (self *Session) quit() {
self.quiting <- self.conn
}
/*
func (self *Session) WritePing() error {
if _, err := self.writer.Write([]byte("P")); err != nil {
return errors.New("write ping error")
}
if err := self.writer.Flush(); err != nil {
log.Printf("Write error: %s\n", err)
return errors.New("write ping error")
}
return nil
}
*/
func (self *Session) Read() {
if self.closing {
return
}
tmpBuffer := make([]byte, 0)
buffer := make([]byte, 1024)
for {
if self.closing {
return
}
n, err := self.reader.Read(buffer)
//self.reader.Read()
if err != nil {
/*
if err == io.EOF {
//fmt.Println("n is =====================", n)
break
}
*/
self.closing = true
log.Println(" connection error: ", err) //self.conn.RemoteAddr().String(),
self.quit()
return
}
if n == 1 && string(buffer[:1]) == "P" {
/*
if _, err := self.writer.Write([]byte("P")); err != nil {
self.quit()
return
}
if err := self.writer.Flush(); err != nil {
log.Printf("Write error: %s\n", err)
self.quit()
return
}
*/
//log.Println(self.conn.RemoteAddr().String(), " recv : P ")
}
if n > 0 {
//fmt.Println("n is ========================================", n)
tmpBuffer = Unpack(append(tmpBuffer, buffer[:n]...), self.incoming)
}
/*
if line, _, err := self.reader.ReadLine(); err == nil {
self.incoming <- string(line)
} else {
log.Printf("Read error: %s\n", err)
self.quit()
return
}
*/
}
}
func (self *Session) WritePing() {
self.outgoing <- []byte("P")
}
func (self *Session) Write() {
for {
if self.closing {
return
}
/*
timeout := make(chan bool)
defer func() {
//close(timeout)
//<-timeout
}()
go func() {
if self.closing {
close(timeout)
return
}
time.Sleep(30 * time.Second)
//fmt.Println("sleep 30")
if self.closing {
close(timeout)
return
}
timeout <- true
//fmt.Println("end sleep 30")
}()
*/
select {
case <-time.After(time.Second * 30):
if self.closing {
return
}
//close(timeout)
//fmt.Println("my time out")
//fmt.Println("recv sleep 30")
go self.WritePing()
//fmt.Println("send outgoing P")
/*
if err := self.WritePing(); err != nil {
self.quit()
return
}
*/
//log.Println(self.conn.RemoteAddr().String(), " send : P ")
case data := <-self.outgoing:
if self.closing {
return
}
var out []byte
if len(data) == 1 && string(data[:1]) == "P" {
out = data
fmt.Println("my time out")
} else {
out = Packet([]byte(data))
}
//fmt.Println(self.conn, " send:", string(out))
if _, err := self.writer.Write(out); err != nil {
log.Printf("Write error: %s\n", err)
self.closing = true
self.quit()
return
}
if err := self.writer.Flush(); err != nil {
log.Printf("Write error: %s\n", err)
self.closing = true
self.quit()
return
}
}
//case <-timeout:
}
}
func (self *Session) Close() {
self.conn.Close()
}
| [
3
] |
package db_node
import (
"github.com/solympe/Golang_Training/pkg/pattern-proxy/db-functions"
)
type dbNode struct {
cache db_functions.DBFunctions
dataBase db_functions.DBFunctions
}
// SendData updates data in the main data-base and cache
func (n *dbNode) SendData(data string) {
db_functions.DBFunctions.SendData(n.cache, data)
db_functions.DBFunctions.SendData(n.dataBase, data)
}
// GetData returns 'fresh' data from cache
func (n dbNode) GetData() string {
freshData := db_functions.DBFunctions.GetData(n.cache)
return freshData
}
// NewDBNode returns new instance of dataBaseNode(proxy)
func NewDBNode(cache db_functions.DBFunctions, db db_functions.DBFunctions) db_functions.DBFunctions {
return &dbNode{cache, db}
}
| [
6
] |
package companyreg
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
//
// Code generated by Alibaba Cloud SDK Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
// BookkeepingCommodity is a nested struct in companyreg response
type BookkeepingCommodity struct {
TopOrgCode string `json:"TopOrgCode" xml:"TopOrgCode"`
CommodityCode string `json:"CommodityCode" xml:"CommodityCode"`
CommodityName string `json:"CommodityName" xml:"CommodityName"`
SpecCode string `json:"SpecCode" xml:"SpecCode"`
PackageVersionCode string `json:"PackageVersionCode" xml:"PackageVersionCode"`
PackageVersionValue string `json:"PackageVersionValue" xml:"PackageVersionValue"`
PackageVersionName string `json:"PackageVersionName" xml:"PackageVersionName"`
CityModuleCode string `json:"CityModuleCode" xml:"CityModuleCode"`
CityModuleValue string `json:"CityModuleValue" xml:"CityModuleValue"`
CityModuleName string `json:"CityModuleName" xml:"CityModuleName"`
OrdTimeCode string `json:"OrdTimeCode" xml:"OrdTimeCode"`
OrdTimeValue string `json:"OrdTimeValue" xml:"OrdTimeValue"`
OrdTimeName string `json:"OrdTimeName" xml:"OrdTimeName"`
TopOrgName string `json:"TopOrgName" xml:"TopOrgName"`
ServiceModuleCode string `json:"ServiceModuleCode" xml:"ServiceModuleCode"`
ServiceModuleValue string `json:"ServiceModuleValue" xml:"ServiceModuleValue"`
ServiceModuleName string `json:"ServiceModuleName" xml:"ServiceModuleName"`
AreaType string `json:"AreaType" xml:"AreaType"`
}
| [
3
] |
package main
import yaml "gopkg.in/yaml.v2"
// Convertable parses input as array of bytes into "from" version, then calls
// convert function which needs to be implemented in specific version conversion
// then it marshals yaml into "out" version and returns the array of bytes of
// that yml file
// Args:
// in interface{} - composev1, composev23 etc type
// out interface{} - composev1, composev23 etc type
// bytes - input docker-compose.yml as array of bytes
// f - func which populates out interface{}
// Returns []byte, error
// Example usage:
// return Convertable(&in, &out, bytes, func() {
// out.Version = "2.3"
// })
func Convertable(in interface{}, out interface{}, bytes *[]byte, f func()) ([]byte, error) {
err := yaml.Unmarshal(*bytes, in)
if err != nil {
return nil, err
}
f()
bytesout, err := yaml.Marshal(&out)
if err != nil {
return nil, err
}
return bytesout, nil
}
type converter map[string]func(bytes *[]byte) ([]byte, error)
func getConverters() map[string]converter {
var converters = make(map[string]converter)
converters["v1"] = converter{
"v2.3": v1tov23,
"v3.2": v1tov32,
}
return converters
}
| [
6
] |
package main
type MyStack struct {
val []int
}
/** Initialize your data structure here. */
func Constructor() MyStack {
return MyStack{[]int{}}
}
/** Push element x onto stack. */
func (this *MyStack) Push(x int) {
this.val = append([]int{x},this.val...)
}
/** Removes the element on top of the stack and returns that element. */
func (this *MyStack) Pop() int {
temp := this.val[0]
this.val = this.val[1:]
return temp
}
/** Get the top element. */
func (this *MyStack) Top() int {
return this.val[0]
}
/** Returns whether the stack is empty. */
func (this *MyStack) Empty() bool {
if len(this.val) > 0 {
return false
}
return true
}
/**
* Your MyStack object will be instantiated and called as such:
* obj := Constructor();
* obj.Push(x);
* param_2 := obj.Pop();
* param_3 := obj.Top();
* param_4 := obj.Empty();
*/
//MyStack stack = new MyStack();
//
//stack.push(1);
//stack.push(2);
//stack.top(); // returns 2
//stack.pop(); // returns 2
//stack.empty(); // returns false | [
3
] |
package lessons
func uniquePathsWithObstacles(obstacleGrid [][]int) int {
m, n := len(obstacleGrid), len(obstacleGrid[0])
f := make([][]int, m)
for i := range f {
f[i] = make([]int, n)
}
for i := 0; i < m; i++ {
for j := 0; j < n; j++ {
if obstacleGrid[i][j] == 1 {
f[i][j] = 0
} else if i == 0 && j == 0 {
f[i][j] = 1
} else if i == 0 {
f[i][j] = f[i][j-1]
} else if j == 0 {
f[i][j] = f[i-1][j]
} else {
f[i][j] = f[i-1][j] + f[i][j-1]
}
}
}
return f[m-1][n-1]
}
| [
5
] |
package cookie
import (
"errors"
"net/http"
"time"
"github.com/gorilla/securecookie"
)
type AuthCookie struct {
Data string `json:"data"`
OrgCID string `json:"org_custom_id"`
SubscriptionID int32 `json:"subscription_id"` // used to check if internal/admin (sub 9999)
}
type SecureCookie struct {
s *securecookie.SecureCookie
expires int
}
// SetExpires in seconds
func (c *SecureCookie) SetExpires(expiry int) {
c.expires = expiry
c.s.MaxAge(expiry)
}
func New(hashKey, blockKey []byte) *SecureCookie {
c := &SecureCookie{}
c.expires = 3700
c.s = securecookie.New(hashKey, blockKey)
c.s.MaxAge(c.expires)
return c
}
func (c *SecureCookie) SetAuthCookie(w http.ResponseWriter, data string, orgCID string, subscriptionID int32) error {
value := &AuthCookie{Data: data, OrgCID: orgCID, SubscriptionID: subscriptionID}
encoded, err := c.s.Encode("linkai_auth", value)
if err != nil {
return err
}
cookie := &http.Cookie{
Name: "linkai_auth",
Value: encoded,
Path: "/app/",
Expires: time.Now().Add(time.Second * time.Duration(c.expires)),
SameSite: http.SameSiteStrictMode,
Secure: true,
HttpOnly: true,
}
http.SetCookie(w, cookie)
return nil
}
func (c *SecureCookie) GetAuthCookie(cookie *http.Cookie) (*AuthCookie, bool, error) {
value := &AuthCookie{}
if err := c.s.Decode("linkai_auth", cookie.Value, &value); err != nil {
return nil, false, err
}
if value.Data == "" {
return nil, false, errors.New("invalid cookie")
}
return value, true, nil
}
| [
6
] |
package config
import (
"bytes"
"errors"
"fmt"
"html/template"
"github.com/spf13/viper"
)
//ErrKey raise when an key is unknown.
var ErrKey = errors.New("KeyError")
// Default values.
var defaults = map[string]interface{}{
"out_file": "/tmp/jocasta_{{.App}}_stdout.log",
"out_maxsize": "0",
"out_backups": "0",
"err_file": "/tmp/jocasta_{{.App}}_stderr.log",
"err_maxsize": "0",
"err_backups": "0",
}
// Config implements the config store of jocasta.
type Config struct {
v *viper.Viper
App string
}
// Params type for characteristics of a stream.
type Params struct {
File string
Maxsize int
Backups int
}
// New initialize the config store.
func New(path string, filename string, app string) (*Config, error) {
v := viper.New()
for key, value := range defaults {
v.SetDefault(key, value)
}
v.SetConfigName(filename) // The file will be named [filename].json, [filename].yaml or [filename.toml]
v.AddConfigPath(path)
v.SetEnvPrefix("jocasta")
v.AutomaticEnv()
err := v.ReadInConfig()
config := &Config{v: v, App: app}
return config, err
}
func keyName(key, subkey string) (string, error) {
switch key {
case "out", "err":
return fmt.Sprintf("%s_%s", key, subkey), nil
default:
return "", fmt.Errorf("don't know anything about %s: %w", key, ErrKey)
}
}
// File return the filename for logs for given stream.
func (c *Config) File(stream string) (string, error) {
key, err := keyName(stream, "file")
if err != nil {
return "", err
}
t, err := template.New("filename").Parse(c.v.GetString(key))
if err != nil {
return "", err
}
var tpl bytes.Buffer
if err := t.Execute(&tpl, c); err != nil {
return "", err
}
return tpl.String(), nil
}
// MaxSize return the max size of log file before rotation for given stream.
func (c *Config) MaxSize(stream string) (uint, error) {
key, err := keyName(stream, "maxsize")
if err != nil {
return 0, err
}
return c.v.GetSizeInBytes(key), nil
}
// Backups return the number of historical files for logs for given stream.
func (c *Config) Backups(stream string) (int, error) {
key, err := keyName(stream, "backups")
if err != nil {
return 0, err
}
return c.v.GetInt(key), nil
}
// GetParams return the whole logs info for given stream in Params type.
func (c *Config) GetParams(stream string) (*Params, error) {
maxsize, err := c.MaxSize(stream)
if err != nil {
return nil, err
}
// The errors are already trapped at c.MaxSize
backups, _ := c.Backups(stream)
file, err := c.File(stream)
if err != nil {
return nil, err
}
p := &Params{
Maxsize: int(maxsize),
Backups: backups,
File: file,
}
return p, nil
}
| [
3,
6
] |
package main
/******************** Testing Objective consensu:STATE TRANSFER ********
* Setup: 4 node local docker peer network with security
* 0. Deploy chaincodeexample02 with 100000, 90000 as initial args
* 1. Send Invoke Requests on multiple peers using go routines.
* 2. Verify query results match on PEER0 and PEER1 after invoke
*********************************************************************/
import (
"fmt"
//"strconv"
"time"
"obcsdk/chaincode"
"obcsdk/peernetwork"
"sync"
)
func main() {
fmt.Println("Creating a local docker network")
peernetwork.SetupLocalNetwork(4, true)
_ = chaincode.InitNetwork()
chaincode.InitChainCodes()
chaincode.RegisterUsers()
time.Sleep(30000 * time.Millisecond)
fmt.Println("\nPOST/Chaincode: Deploying chaincode at the beginning ....")
dAPIArgs0 := []string{"example02", "init"}
depArgs0 := []string{"a", "100000", "b", "90000"}
chaincode.Deploy(dAPIArgs0, depArgs0)
//var resa, resb string
var inita, initb, curra, currb int
inita = 100000
initb = 90000
curra = inita
currb = initb
time.Sleep(60000 * time.Millisecond)
fmt.Println("\nPOST/Chaincode: Querying a and b after deploy >>>>>>>>>>> ")
qAPIArgs0 := []string{"example02", "query"}
qArgsa := []string{"a"}
qArgsb := []string{"b"}
A, _ := chaincode.Query(qAPIArgs0, qArgsa)
B, _ := chaincode.Query(qAPIArgs0, qArgsb)
myStr := fmt.Sprintf("\nA = %s B= %s", A, B)
fmt.Println(myStr)
numReq := 250
InvokeLoop(numReq)
time.Sleep(120000 * time.Millisecond)
curra = curra - 20
currb = currb + 20
fmt.Println("\nPOST/Chaincode: Querying a and b after invoke >>>>>>>>>>> ")
qAPIArgs00 := []string{"example02", "query", "PEER0"}
qAPIArgs01 := []string{"example02", "query", "PEER1"}
qAPIArgs02 := []string{"example02", "query", "PEER2"}
qAPIArgs03 := []string{"example02", "query", "PEER3"}
res0A, _ := chaincode.QueryOnHost(qAPIArgs00, qArgsa)
res0B, _ := chaincode.QueryOnHost(qAPIArgs00, qArgsb)
res1A, _ := chaincode.QueryOnHost(qAPIArgs01, qArgsa)
res1B, _ := chaincode.QueryOnHost(qAPIArgs01, qArgsb)
res2A, _ := chaincode.QueryOnHost(qAPIArgs02, qArgsa)
res2B, _ := chaincode.QueryOnHost(qAPIArgs02, qArgsb)
res3A, _ := chaincode.QueryOnHost(qAPIArgs03, qArgsa)
res3B, _ := chaincode.QueryOnHost(qAPIArgs03, qArgsb)
fmt.Println("Results in a and b PEER0 : ", res0A, res0B)
fmt.Println("Results in a and b PEER1 : ", res1A, res1B)
fmt.Println("Results in a and b PEER2 : ", res2A, res2B)
fmt.Println("Results in a and b PEER3 : ", res3A, res3B)
ht0, _ := chaincode.GetChainHeight("PEER0")
ht1, _ := chaincode.GetChainHeight("PEER1")
ht2, _ := chaincode.GetChainHeight("PEER2")
ht3, _ := chaincode.GetChainHeight("PEER3")
fmt.Printf("ht0: %d, ht1: %d, ht2: %d, ht3: %d ", ht0, ht1, ht2, ht3)
}
func InvokeLoop(numReq int) {
var wg sync.WaitGroup
invArgs0 := []string{"a", "b", "1"}
iAPIArgsCurrPeer1 := []string{"example02", "invoke", "PEER1"}
wg.Add(2)
go func() {
defer wg.Done()
k := 1
for k <= numReq {
go chaincode.InvokeOnPeer(iAPIArgsCurrPeer1, invArgs0)
k++
}
fmt.Println("# of Req Invoked on PEER1 ", k)
}()
go func() {
defer wg.Done()
iAPIArgsCurrPeer3 := []string{"example02", "invoke", "PEER3"}
k := 1
for k <= numReq {
go chaincode.InvokeOnPeer(iAPIArgsCurrPeer3, invArgs0)
k++
}
fmt.Println("# of Req Invoked on PEER3", k)
}()
wg.Wait()
}
| [
0,
3
] |
package toml
import (
"time"
"strconv"
"runtime"
"strings"
"fmt"
)
type Tree struct {
Root *ListNode // top-level root of the tree.
text string
lex *lexer
token [3]token // three-token lookahead for parser.
peekCount int
}
func Parse(text string) (tree *Tree, err error) {
defer parseRecover(&err)
t := &Tree{}
t.text = text
t.lex = lex(text)
t.parse()
return t, nil
}
// recover is the handler that turns panics into returns from the top level of Parse.
func parseRecover(errp *error) {
e := recover()
if e != nil {
if _, ok := e.(runtime.Error); ok {
panic(e)
}
*errp = e.(error)
}
return
}
// next returns the next tok.
func (t *Tree) next() token {
if t.peekCount > 0 {
t.peekCount--
} else {
t.token[0] = t.lex.nextToken()
}
return t.token[t.peekCount]
}
// backup backs the input stream up one tok.
func (t *Tree) backup() {
t.peekCount++
}
// backup2 backs the input stream up two tokens.
// The zeroth token is already there.
func (t *Tree) backup2(t1 token) {
t.token[1] = t1
t.peekCount = 2
}
// backup3 backs the input stream up three tokens
// The zeroth token is already there.
func (t *Tree) backup3(t2, t1 token) { // Reverse order: we're pushing back.
t.token[1] = t1
t.token[2] = t2
t.peekCount = 3
}
// peek returns but does not consume the next tok.
func (t *Tree) peek() token {
if t.peekCount > 0 {
return t.token[t.peekCount-1]
}
t.peekCount = 1
t.token[0] = t.lex.nextToken()
return t.token[0]
}
// nextNonSpace returns the next non-space tok.
func (t *Tree) nextNonSpace() (tok token) {
for {
tok = t.next()
if tok.typ != tokenSpace {
break
}
}
//pd("next %d %s", tok.typ, tok.val)
return tok
}
// peekNonSpace returns but does not consume the next non-space tok.
func (t *Tree) peekNonSpace() (tok token) {
for {
tok = t.next()
if tok.typ != tokenSpace {
break
}
}
t.backup()
return tok
}
// Parsing.
// ErrorContext returns a textual representation of the location of the node in the input text.
func (t *Tree) ErrorContext(n Node) (location, context string) {
pos := int(n.Position())
text := t.text[:pos]
byteNum := strings.LastIndex(text, "\n")
if byteNum == -1 {
byteNum = pos // On first line.
} else {
byteNum++ // After the newline.
byteNum = pos - byteNum
}
lineNum := 1 + strings.Count(text, "\n")
// TODO
//context = n.String()
context = "TODO"
if len(context) > 20 {
context = fmt.Sprintf("%.20s...", context)
}
return fmt.Sprintf("%d:%d", lineNum, byteNum), context
}
// errorf formats the error and terminates processing.
func (t *Tree) errorf(format string, args ...interface{}) {
t.Root = nil
format = fmt.Sprintf("%d: syntax error: %s", t.lex.lineNumber(), format)
panic(fmt.Errorf(format, args...))
}
// error terminates processing.
func (t *Tree) error(err error) {
t.errorf("%s", err)
}
// expect consumes the next token and guarantees it has the required type.
func (t *Tree) expect(expected tokenType, context string) token {
tok := t.nextNonSpace()
if tok.typ != expected {
t.unexpected(tok, context)
}
return tok
}
// expectOneOf consumes the next token and guarantees it has one of the required types.
func (t *Tree) expectOneOf(expected1, expected2 tokenType, context string) token {
tok := t.nextNonSpace()
if tok.typ != expected1 && tok.typ != expected2 {
t.unexpected(tok, context)
}
return tok
}
// unexpected complains about the token and terminates processing.
func (t *Tree) unexpected(tok token, context string) {
t.errorf("unexpected %s in %s", tok, context)
}
func (t *Tree) parse() Node {
t.Root = newList(t.peek().pos)
for t.peek().typ != tokenEOF {
n := t.top()
t.Root.append(n)
}
return nil
}
// key = value
// [keygroup]
func (t *Tree) top() Node {
switch tok := t.peekNonSpace(); tok.typ {
case tokenError:
t.nextNonSpace()
t.errorf("%s", tok.val)
case tokenKeyGroup:
return t.entryGroup()
case tokenKey:
return t.entry()
default:
t.errorf("unexpected %q", tok.val)
return nil
}
return nil
}
// [keygroup]
// ...
func (t *Tree) entryGroup() Node {
token := t.nextNonSpace()
keyGroup := parseKeyGroup(token)
entries := newList(t.peek().pos)
Loop:
for {
switch tok := t.peekNonSpace(); tok.typ {
case tokenKey:
entries.append(t.entry())
default:
break Loop
}
}
return newEntryGroup(token.pos, keyGroup, entries)
}
// "[foo.bar]"
func parseKeyGroup(tok token) *KeyGroupNode {
text := tok.val
name := text[1:len(text)-1]
keys := newList(tok.pos+Pos(1))
for _, v := range strings.Split(name, ".") {
keys.append(newKey(tok.pos+Pos(len(v)), v))
}
return newKeyGroup(tok.pos, keys, text)
}
// key = value
func (t *Tree) entry() Node {
tok := t.nextNonSpace()
key := newKey(tok.pos, tok.val)
//pd("entry %s", tok.val)
t.expect(tokenKeySep, "key seperator")
return newEntry(tok.pos, key, t.value())
}
// value: string, array, ...
func (t *Tree) value() Node {
switch tok := t.nextNonSpace(); tok.typ {
case tokenBool:
return newBool(tok.pos, tok.val == "true")
case tokenNumber:
v, err := newNumber(tok.pos, tok.val)
if err != nil { t.error(err) }
return v
case tokenString:
//pd("str %d %s", tok.typ, tok.val)
v, err := strconv.Unquote(tok.val)
if err != nil { t.error(err) }
return newString(tok.pos, v, tok.val)
case tokenDatetime:
v, err := time.Parse(time.RFC3339, tok.val)
if err != nil { t.error(err) }
return newDatetime(tok.pos, v)
case tokenArrayStart:
return t.array()
default:
t.errorf("unexpected %q in value", tok.val)
return nil
}
return nil
}
// [1, 2]
func (t *Tree) array() Node {
pos := t.peek().pos
array := newList(pos)
Loop:
for {
switch tok := t.peekNonSpace(); tok.typ {
case tokenArrayEnd:
t.nextNonSpace()
break Loop
default:
//pd("array %s", tok.val)
node := t.value()
if t.peekNonSpace().typ != tokenArrayEnd {
t.expect(tokenArraySep, "array")
}
array.append(node)
}
}
return newArray(pos, array)
}
| [
3
] |
package cmd
import (
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
corev1 "k8s.io/api/core/v1"
"github.com/snowdrop/k8s-supervisor/pkg/common/config"
"github.com/snowdrop/k8s-supervisor/pkg/common/oc"
)
var (
ports string
)
var debugCmd = &cobra.Command{
Use: "debug [flags]",
Short: "Debug your SpringBoot application",
Long: `Debug your SpringBoot application.`,
Example: ` sb debug -p 5005:5005`,
Args: cobra.RangeArgs(0, 1),
Run: func(cmd *cobra.Command, args []string) {
log.Info("Debug command called")
_, pod := SetupAndWaitForPod()
podName := pod.Name
// Append Debug Env Vars and update POD
//log.Info("[Step 5] - Add new ENV vars for remote Debugging")
//pod.Spec.Containers[0].Env = append(pod.Spec.Containers[0].Env,debugEnvVars()...)
//clientset.CoreV1().Pods(application.Namespace).Update(pod)
log.Info("Restart the Spring Boot application ...")
oc.ExecCommand(oc.Command{Args: []string{"rsh", podName, config.SupervisordBin, config.SupervisordCtl, "stop", config.RunCmdName}})
oc.ExecCommand(oc.Command{Args: []string{"rsh", podName, config.SupervisordBin, config.SupervisordCtl, "start", config.RunCmdName}})
// Forward local to Remote port
log.Info("Remote Debug the Spring Boot Application ...")
oc.ExecCommand(oc.Command{Args: []string{"port-forward", podName, ports}})
},
}
func init() {
debugCmd.Flags().StringVarP(&ports, "ports", "p", "5005:5005", "Local and remote ports to be used to forward traffic between the dev pod and your machine.")
//debugCmd.MarkFlagRequired("ports")
debugCmd.Annotations = map[string]string{"command": "debug"}
rootCmd.AddCommand(debugCmd)
}
func debugEnvVars() []corev1.EnvVar {
return []corev1.EnvVar{
{
Name: "JAVA_DEBUG",
Value: "true",
},
{
Name: "JAVA_DEBUG_PORT",
Value: "5005",
},
}
}
| [
3
] |
package articles
import (
"github.com/PuerkitoBio/goquery"
"github.com/yevchuk-kostiantyn/WebsiteAggregator/models"
"log"
"strings"
)
func Search(config *models.Article) {
response, err := goquery.NewDocument(config.URL)
log.Println("New Search", config)
if err != nil {
panic("Bad URL!")
}
article := ""
response.Find("p").Each(func(index int, item *goquery.Selection) {
line := item.Text()
article += line
})
if IsInteresting(article, config.Interest) {
key := config.Interest + "|" + config.URL
log.Println("Interesting!")
SaveToDB(key, article)
} else {
log.Println("Not interesting")
}
}
func IsInteresting(article string, interest string) bool {
if strings.Contains(article, interest) {
return true
} else {
return false
}
}
| [
6
] |
package detector
import (
"fmt"
"reflect"
"strings"
"github.com/hashicorp/hcl/hcl/ast"
"github.com/hashicorp/hcl/hcl/token"
"github.com/wata727/tflint/config"
"github.com/wata727/tflint/evaluator"
"github.com/wata727/tflint/issue"
"github.com/wata727/tflint/logger"
)
type Detector struct {
ListMap map[string]*ast.ObjectList
Config *config.Config
AwsClient *config.AwsClient
EvalConfig *evaluator.Evaluator
Logger *logger.Logger
Error bool
}
var detectors = map[string]string{
"aws_instance_invalid_type": "DetectAwsInstanceInvalidType",
"aws_instance_previous_type": "DetectAwsInstancePreviousType",
"aws_instance_not_specified_iam_profile": "DetectAwsInstanceNotSpecifiedIamProfile",
"aws_instance_default_standard_volume": "DetectAwsInstanceDefaultStandardVolume",
"aws_db_instance_default_parameter_group": "DetectAwsDbInstanceDefaultParameterGroup",
"aws_elasticache_cluster_default_parameter_group": "DetectAwsElasticacheClusterDefaultParameterGroup",
"aws_instance_invalid_iam_profile": "DetectAwsInstanceInvalidIamProfile",
}
func NewDetector(listMap map[string]*ast.ObjectList, c *config.Config) (*Detector, error) {
evalConfig, err := evaluator.NewEvaluator(listMap, c)
if err != nil {
return nil, err
}
return &Detector{
ListMap: listMap,
Config: c,
AwsClient: c.NewAwsClient(),
EvalConfig: evalConfig,
Logger: logger.Init(c.Debug),
Error: false,
}, nil
}
func hclLiteralToken(item *ast.ObjectItem, k string) (token.Token, error) {
objItems, err := hclObjectItems(item, k)
if err != nil {
return token.Token{}, err
}
if v, ok := objItems[0].Val.(*ast.LiteralType); ok {
return v.Token, nil
}
return token.Token{}, fmt.Errorf("ERROR: `%s` value is not literal", k)
}
func hclObjectItems(item *ast.ObjectItem, k string) ([]*ast.ObjectItem, error) {
items := item.Val.(*ast.ObjectType).List.Filter(k).Items
if len(items) == 0 {
return []*ast.ObjectItem{}, fmt.Errorf("ERROR: key `%s` not found", k)
}
return items, nil
}
func IsKeyNotFound(item *ast.ObjectItem, k string) bool {
items := item.Val.(*ast.ObjectType).List.Filter(k).Items
return len(items) == 0
}
func (d *Detector) Detect() []*issue.Issue {
var issues = []*issue.Issue{}
for ruleName, detectorMethod := range detectors {
if d.Config.IgnoreRule[ruleName] {
d.Logger.Info(fmt.Sprintf("ignore rule `%s`", ruleName))
continue
}
d.Logger.Info(fmt.Sprintf("detect by `%s`", ruleName))
method := reflect.ValueOf(d).MethodByName(detectorMethod)
method.Call([]reflect.Value{reflect.ValueOf(&issues)})
for name, m := range d.EvalConfig.ModuleConfig {
if d.Config.IgnoreModule[m.Source] {
d.Logger.Info(fmt.Sprintf("ignore module `%s`", name))
continue
}
d.Logger.Info(fmt.Sprintf("detect module `%s`", name))
moduleDetector := &Detector{
ListMap: m.ListMap,
Config: d.Config,
EvalConfig: &evaluator.Evaluator{
Config: m.Config,
},
Logger: d.Logger,
}
method := reflect.ValueOf(moduleDetector).MethodByName(detectorMethod)
method.Call([]reflect.Value{reflect.ValueOf(&issues)})
}
}
return issues
}
func (d *Detector) evalToString(v string) (string, error) {
ev, err := d.EvalConfig.Eval(strings.Trim(v, "\""))
if err != nil {
return "", err
} else if reflect.TypeOf(ev).Kind() != reflect.String {
return "", fmt.Errorf("ERROR: `%s` is not string", v)
} else if ev.(string) == "[NOT EVALUABLE]" {
return "", fmt.Errorf("ERROR; `%s` is not evaluable", v)
}
return ev.(string), nil
}
| [
5
] |
package chat
//User reprenent of User model
type User struct {
Name string `json:"name"`
Username string `json:"username"`
}
//ResponseJoin represent of joined message
type ResponseJoin struct {
Success bool `json:"success"`
Message string `json:"message"`
Username string `json:"username"`
}
//Message represent of message data
type Message struct {
From string `json:"from"`
To string `json:"to"`
Message string `json:"message"`
}
| [
3
] |
package blockchain
import (
"crypto/sha1"
"fmt"
)
//Block construct
type Block struct {
id int
hash string
previousBlockHash string
Content []byte
}
//BlockChain type
type BlockChain struct {
currentID int
blocks []*Block
}
func hash(ip []byte) string {
sha1 := sha1.New()
sha1.Write(ip)
return fmt.Sprintf("%x", sha1.Sum(nil))
}
//CalculateHash Calculate the hash of the block
func (block *Block) calculateHash() {
block.hash = hash([]byte(string(block.id) +
string(block.previousBlockHash) +
string(block.Content)))
}
//NewChain Create new chain
func NewChain() *BlockChain {
return &BlockChain{
currentID: 0,
blocks: make([]*Block, 0),
}
}
//NewBlock Create new block
func (blockchain *BlockChain) NewBlock(content []byte) *Block {
return &Block{
Content: content,
}
}
//GetBlocks Get all blocks
func (blockchain *BlockChain) GetBlocks() []*Block {
return blockchain.blocks
}
//AddBlock Add the block to chain
func (blockchain *BlockChain) AddBlock(block *Block) {
var prevHash string
if blockchain.currentID == 0 {
prevHash = ""
} else {
prevHash = blockchain.blocks[blockchain.currentID-1].hash
}
blockchain.currentID = blockchain.currentID + 1
block.id = blockchain.currentID
block.previousBlockHash = prevHash
block.calculateHash()
blockchain.blocks = append(blockchain.blocks, block)
}
//VerifyChain Verify integrity of blockchain
func (blockchain *BlockChain) VerifyChain() bool {
var originalHash string
isValid := true
for _, block := range blockchain.blocks {
originalHash = block.hash
block.calculateHash()
isValid = isValid && originalHash == block.hash
}
return isValid
}
| [
0,
3
] |
End of preview. Expand
in Data Studio
go-critic-style
A multi‑label dataset of Go code snippets annotated with style violations from the go‑critic linter's "style" group.
Curated from the bigcode/the‑stack‑v2‑dedup "Go" split, filtered to examples of manageable length.
Label Set
List of style violations covered by this dataset:
ID | Label | Description |
---|---|---|
0 | assignOp |
Could use += , -= , *= , etc. |
1 | builtinShadow |
Shadows a predeclared identifier. |
2 | captLocal |
Local variable name begins with an uppercase letter. |
3 | commentFormatting |
Comment is non‑idiomatic or badly formatted. |
4 | elseif |
Nested if statement that can be replaced with else-if . |
5 | ifElseChain |
Repeated if-else statements can be replaced with switch . |
6 | paramTypeCombine |
Function parameter types that can be combined (e.g. x, y int ). |
7 | singleCaseSwitch |
Statement switch that could be better written as if . |
Splits
The dataset is partitioned into training, validation, and test subsets in a 70/10/20 ratio:
Split | # Examples | Approx. % |
---|---|---|
train | 1536 | 70% |
validation | 222 | 10% |
test | 448 | 20% |
- Downloads last month
- 148