NAV

简介

本文档是什么

这是一个非常有用的开发者备忘单, 收集各种工作中可能会用到的、但是不太容易完全记住的工具的经典用法,方便快速查询,提高工作效率。

本文档不是什么

它不是工具手册,只是作为工具手册的补充,手册中会介绍各种用法,但是可能缺少一些实战中的用法的例子,本文档只介绍经典例子,不会覆盖工具所有功能。

bash

awk

间隔1s统计各个状态的TCP链接数

# macos
while true; do
    netstat -ntf inet | awk '/^tcp/{++s[$NF]}END{for(a in s){print a, s[a]}}'
    sleep 1
done

# linux
while true; do 
    netstat -n | awk '/^tcp/{++s[$NF]}END{for(a in s){print a, s[a]}}'
    sleep 1
done

CLOSE_WAIT 45
ESTABLISHED 303
TIME_WAIT 874
SYN_SENT 1

大小写转换

echo "Web3Yoda" | awk '{print(tolower($0))}'

web3yoda


echo "Web3Yoda" | awk '{print(toupper($0))}'

WEB3YODA

参数解释

awk

参数描述
-F指定字段分隔符

netstat

参数描述
-n不对主机IP反向DNS解析主机名
-t仅显示TCP

sed

color

颜色代码变量


## color code variables
C_RESET="\033[0m"
C_RESET_UNDERLINE="\033[24m"
C_RESET_REVERSE="\033[27m"
C_DEFAULT="\033[39m"
C_DEFAULTB="\033[49m"
C_BOLD="\033[1m"
C_BRIGHT="\033[2m"
C_UNDERSCORE="\033[4m"
C_REVERSE="\033[7m"
C_BLACK="\033[30m"
C_RED="\033[31m"
C_GREEN="\033[32m"
C_BROWN="\033[33m"
C_BLUE="\033[34m"
C_MAGENTA="\033[35m"
C_CYAN="\033[36m"
C_WHITE="\033[37m"

printf ${C_GREEN}${C_BOLD}GreenBold${C_RESET}

function

有用的函数或者代码段

只对单个命令设置 _proxy 代理环境变量

# 可能需要sudo执行,  pw = proxy wrapper
cat <<EOF > /tmp/pw
#!/bin/zsh
export http_proxy=http://127.0.0.1:1080 https_proxy=http://127.0.0.1:1080 all_proxy=socks5://127.0.0.1:1087 no_proxy=10.0.0.0/8,127.0.0.1,localhost
"$@"
unset http_proxy https_proxy all_proxy no_proxy
EOF

# mv to PATH and add x permission
sudo mv /tmp/pw /usr/local/bin/pw && chmod +x /usr/local/bin/pw

# 执行需要代理的命令的时候,前面加上 pw
pw git clone https://github.com/web3yoda/example.git
pw brew install
pw npm install

有时候写bash脚本需要做到跨平台,需要判断当前os是那种


if [[ "$OSTYPE" == "linux-gnu"* ]]; then
        # ...
elif [[ "$OSTYPE" == "darwin"* ]]; then
        # Mac OSX
elif [[ "$OSTYPE" == "cygwin" ]]; then
        # cygwin for Windows
elif [[ "$OSTYPE" == "msys" ]]; then
        # MinGW for Windows 
elif [[ "$OSTYPE" == "freebsd"* ]]; then
        # freebsd
else
        # Unknown.
fi

misc

一些有用的杂七杂八的命令

生成16位仅包含大小写字母+数字的密码(兼容macos和linux)

echo `date +%s`$RANDOM | sha256sum | base64 | head -c16; echo
NzY5YjgxMDc1OTFl

openssl rand -hex 32 | base64 | head -c32
Mzg4ZjcwZTAyYThkZTBkNjdjYmYwYTNk

一条命令下载tar.gz包并解压到指定目录 还可以去掉打包父目录, dockerfile里常用

curl -s https://gethstore.blob.core.windows.net/builds/geth-alltools-linux-amd64-1.11.6-ea9e62ca.tar.gz \
    | tar xzf - -C /usr/local/bin/ --strip-components=1

跑一个循环间隔1s检查一个服务是否启动成功


while true; do curl http://localhost:8080 >/dev/null && break; sleep 1; done

parameter substitution https://tldp.org/LDP/abs/html/parameter-substitution.html


$ a='hello:world'

$ b=${a%:*}
$ echo "$b"
hello

$ a='hello:world:of:tomorrow'

$ echo "${a%:*}"
hello:world:of

$ echo "${a%%:*}"
hello

$ echo "${a#*:}"
world:of:tomorrow

$ echo "${a##*:}"
tomorrow

kubernetes

kubectl

kubectl

熟练掌握好kubectl的各种用法,可以让你在跟k8s打交道工作中事半功倍,以免花大量时间在google搜索的结果上

工欲善其事必先利其器,首先掌握几个子命令用法

# 查看所有resource类型,包括crd
kubectl api-resources

# sort by name
kubectl api-resources --sort-by=name

# select api group
kubectl api-resources --api-group=networking.k8s.io

# 查看你想用的yaml类型的所有可用字段
kubectl explain deployments --recursive | less

# 尤其是CRD资源, 有些比较小众,google搜不到例子,或者api版本变更导致搜到的例子都是错的,可以直接查看
kubectl explain certificates --recursive | less

jsonpath 的使用, 如果get -o yaml 输出内容太多,可以用jsonpath选择单个字段输出 详细参考 https://github.com/json-path/JsonPath

# 只查看一个指定jsonpath
kubectl get configmap nginx -o jsonpath='{.data.nginx\.conf}'

# 不太了解整个数据结构 写不出jsonpath?
kubectl get pods -o json | jq -c 'paths|join(".")' | less

查看pod资源实际使用量

# cpu top 10
kubectl -ndefault top pod --sort-by=cpu --no-headers | head

# mem top 10
kubectl -ndefault top pod --sort-by=memory --no-headers | head

# --containers 显示每个容器
kubectl -ndefault top pod --containers --sort-by=cpu --no-headers | head

查看 node资源实际使用量


# cpu top 10
kubectl top node --sort-by=cpu --no-headers | head

# mem top 10
kubectl top node --sort-by=memory --no-headers | head

查看 event


kubectl get event -n default --field-selector involvedObject.name=nginx-0

ad-hoc方式修改运行时的资源


# 修改replicas字段
kubectl scale --replicas=2 deploy/nginx

# 修改单个环境变量, 可以 kubectl set env -h 查看所有用法例子
kubectl set env sts/nginx NGINX_PORT=8080

# 查看一个pod注入的所有环境变量,注意这里虽然是set子命令但是 不会做改动
kubectl set env sts/nginx --list

临时运行一个 debugger pod 做操作,比如连数据库


# mysql
kubectl run mysql-client --rm -i --tty --image mysql:5.7 -- bash

# pgclient  
kubectl run pg-client --rm -i --tty --image postgres:13-alpine -- sh

# netshoot swiss-knife
kubectl run debugger --rm -i --tty --image nicolaka/netshoot:latest -- bash

ad-hoc 方式创建一个资源

# 创建一个deployment,理论上只需要一个image即可
kubectl create deployment nginx --image=nginx

# 使用--from-literal创建一个secret
kubectl create secret generic nginx --from-literal=basic_password=Mzk5YWY1YzNjMWEy

# 使用--from-literal创建一个configmap
kubectl create configmap nginx --from-literal=basic_username=admin

用patch方式修改资源

# scale with kubctl patch
kubectl patch deploy nginx --patch '{"spec": {"replicas": 2}}'

# disable healthcheck if you want to troubleshoot healthcheck failure
# --type json, use json patch(RFC 6902), check official docs
kubectl patch deployment nginx --type json -p='[{"op": "remove", "path": "/spec/template/spec/containers/0/livenessProbe"}]'

# increase initialDelaySeconds to 3600
kubectl patch deploy nginx --type json \
  -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/livenessProbe/initialDelaySeconds", "value": 3600}]'
kubectl patch deploy nginx --type json \
  -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/readinessProbe/initialDelaySeconds", "value": 3600}]'

# change "command" to "sleep 1000" to troubleshoot entrypoint command error
kubectl patch deployment nginx --type json -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/command", "value":["sleep", "1000"]}]'

# change image to troubleshoot software version issue
kubectl patch deployment nginx --type='json' -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value":"nginx:1.21"}]'

# or with set image sub cmd
kubectl set image deployment/nginx nginx=nginx:1.21 

从标准输出apply一个yaml文件

kubectl apply -f - <<EOF
apiVersion: v1
kind: Pod
metadata:
  name: busybox-sleep
spec:
  containers:
  - name: busybox
    image: busybox:1.28
    args:
    - sleep
    - infinity
EOF

滚动方式重启一个deployment

会先起新pod,ready之后再删旧pod,比较安全

kubectl -n default rollout restart deploy nginx 

通过kubectl在本地和容器之间传输文件

# cp to /tmp
kubectl cp /tmp/hello.txt debugger-0:/tmp/hello.txt
# cp to current workDir
kubectl cp /tmp/hello.txt debugger-0:hello.txt

# check
kubectl exec -it debugger-0 -- sh -c " pwd && ls -lh && ls /tmp -lh && cat hello.txt "

kubectl cp debugger-0:/tmp/hello.txt /tmp/world.txt
tar: removing leading '/' from member names # this is fine

将pod/service等端口映射到本地

kubectl port-forward mongo-75f59d57f4-4nd6q 28015:27017

kubectl port-forward pods/mongo-75f59d57f4-4nd6q 28015:27017

kubectl port-forward deployment/mongo 28015:27017

kubectl port-forward replicaset/mongo-75f59d57f4 28015:27017

kubectl port-forward service/mongo 28015:27017

helm

yaml

一些常用的yaml资源模板或者片段 复制过来改改直接用

deployment 模板,直接复制改改即可用

# deployment
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
  namespace: default
  labels:
    app: nginx
spec:
  replicas: 1
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx:1.21
        ports:
        - containerPort: 80 

statefulset

# statefulset
apiVersion: apps/v1
kind: Statefulset
metadata:
  name: nginx-statefulset
  namespace: default
  labels:
    app: nginx
spec:
  serviceName: nginx
  replicas: 1
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx:1.21
        ports:
        - containerPort: 80
        volumeMounts:
        - name: www
          mountPath: /usr/share/nginx/html
  volumeClaimTemplates:
  - metadata:
      name: www
    spec:
      accessModes: [ "ReadWriteOnce" ]
      resources:
        requests:
          storage: 1Gi     

service

# service
apiVersion: v1
kind: Service
metadata:
  name: nginx
  namespace: default
  labels:
    app: nginx
spec:
  ports:
  - port: 80
    name: web
  selector:
    app: nginx   

ingress

# ingress
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  annotations: {}
  name: nginx
  namespace: default
  labels:
    app: nginx
spec:
  rules:
  - host: www.example.com
    http:
      paths:
      - backend:
          service:
            name: nginx
            port:
              number: 80
        path: /
        pathType: ImplementationSpecific

configmap


# configmap
apiVersion: v1
kind: ConfigMap
metadata:
  name: nginx
  namespace: default
data:
  key1: value1
  nginx.conf: |
    events {
      worker_connections  1024;
    }
    worker_processes auto;
    error_log /dev/stdout notice;
    pid /var/run/nginx.pid;
    http {
      server {
        listen 80 default_server;
        server_name _;
        return 301 https://$host$request_uri;
      }
    }
    stream {
      server {
        listen 443;
        resolver 172.20.0.10;
        proxy_pass 127.0.0.1:443;
      }
    }               

secret

# secret
apiVersion: v1
kind: Secret
metadata:
  name: nginx
  namespace: default
type: Opaque
data:
  username: YWRtaW4=
  password: MWYyZDFlMmU2N2Rm

snippet livenessProbe and readinessProbe with http protocol

# livenessProbe and readinessProbe with http protocol
spec:
  containers:
  - name: app
    livenessProbe:
      httpGet:
        path: /healthz
        port: 8080
        httpHeaders:
        - name: Custom-Header
          value: Awesome
      initialDelaySeconds: 3
      periodSeconds: 3
    readinessProbe:
      httpGet:
        path: /healthz
        port: 8080
        httpHeaders:
        - name: Custom-Header
          value: Awesome
      initialDelaySeconds: 3
      periodSeconds: 3          

snippet livenessProbe and readinessProbe with TCP protocol

# livenessProbe and readinessProbe with TCP protocol
spec:
  containers:
  - name: app
    readinessProbe:
      tcpSocket:
        port: 8080
      initialDelaySeconds: 5
      periodSeconds: 10
    livenessProbe:
      tcpSocket:
        port: 8080
      initialDelaySeconds: 15
      periodSeconds: 20        

mount a configmap


# mount a configmap
apiVersion: v1
kind: Pod
metadata:
  name: nginx
  namespace: default
spec:
  containers:
    - name: nginx
      image: nginx:latest
      volumeMounts:
      - name: config-volume
        mountPath: /etc/nginx
        subPath:
  volumes:
    - name: config-volume
      configMap:
        name: nginx        

env from configmap or secret

# env from configmap or secret
apiVersion: v1
kind: Pod
metadata:
  name: nginx
  namespace: default
spec:
  containers:
    - name: nginx
      image: nginx:latest
      env:
      - name: SECRET_USERNAME
        valueFrom:
          configMapKeyRef:
            name: nginx
            key: username
            optional: false
      - name: SECRET_PASSWORD
        valueFrom:
          secretKeyRef:
            name: nginx
            key: password
            optional: false
      envFrom:
      - configMapRef:
          name: nginx
      - secretRef:
          name: nginx              

snippet,affinity,同一个服务的不同pod调度到不同的node上

spec:
  affinity:
    podAntiAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
      - labelSelector:
          matchExpressions:
          - key: app
            operator: In
            values:
            - nginxsniproxy
        topologyKey: "kubernetes.io/hostname"

clusterrole

clusterrole binding

docker

docker compose

Dockerfile

一些有用的小技巧

将entrypoint脚本集成进Dockerfile


# syntax=docker/dockerfile:1
FROM busybox:latest
COPY --chmod=755 <<EOF /app/run.sh
#!/bin/sh
while true; do
  echo -ne "The time is now $(date +%T)\\r"
  sleep 1
done
EOF

ENTRYPOINT /app/run.sh

awscli

misc

有用的环境变量 https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html

有用的环境变量


# profile
export AWS_PROFILE=your-profile

# 设置为空可以禁用翻页器pager
export AWS_PAGER=

# credentials, ak sk
export AWS_ACCESS_KEY_ID=xxx
export AWS_SECRET_ACCESS_KEY=xxx
export AWS_ROLE_SESSION_NAME=xxx

# region
export AWS_DEFAULT_REGION=us-west-2

aws acm

aws acm 常用子命令


# describe certificates
for c in $(aws acm list-certificates \
          --query 'CertificateSummaryList[].CertificateArn' --output text)
do aws acm describe-certificate --certificate-arn $c \
    --query 'Certificate.{CertificateArn:CertificateArn, DomainName:DomainName,SubjectAlternativeNames:SubjectAlternativeNames, Status:Status,NotAfter:NotAfter}' | jq
done

aws cloudfront

aws cloudfront 常用子命令

列出所有 distribution



# list distributions with id, aliases, domian name and origin
aws cloudfront list-distributions \
  | jq  '.DistributionList.Items[] | {"Id": .Id, "Aliases": .Aliases.Items, "Domain": .DomainName, "Origin": .Origins.Items[0].DomainName}'

创建一个invalidation

# create an invalidation, clear cache
aws cloudfront create-invalidation --distribution-id E3NXXXXXXXXXXX --paths "/*"

aws ec2

aws ec2 常用子命令

使用 –filters 查询ec2


# find ec2 with --filters
aws ec2 describe-instances \
    --filters Name=instance-type,Values=t2.micro,t3.micro 
              Name=availability-zone,Values=us-east-2c                                        

# find ec2 with --filters by tag
aws ec2 describe-instances \
    --filters Name=tag:Owner,Values=my-team

# find ec2 with --filters by tag
aws ec2 describe-instances \
    --filters "Name=tag:Owner,Values=my-team"

结合 –filter 和 –query 只输出有用的内容



# get only useful information of ec2 instances wich both --filters and --query
aws ec2 describe-instances \
    --filters "Name=tag:Owner,Values=my-team" \
    --query 'Reservations[*].Instances[*].{Instance:InstanceId, Name:Tags[?Key==`Name`]|[0].Value}'

# --output table makes outputs more human readable.
aws ec2 describe-instances \
    --filters Name=tag-key,Values=Name \
    --query 'Reservations[*].Instances[*].{Instance:InstanceId,AZ:Placement.AvailabilityZone,Name:Tags[?Key==`Name`]|[0].Value}' \
    --output table

-------------------------------------------------------------
|                     DescribeInstances                     |
+--------------+-----------------------+--------------------+
|      AZ      |       Instance        |        Name        |
+--------------+-----------------------+--------------------+
|  us-east-2b  |  i-057750d42936e468a  |  my-prod-server    |
|  us-east-2a  |  i-001efd250faaa6ffa  |  test-server-1     |
|  us-east-2a  |  i-027552a73f021f3bd  |  test-server-2     |
+--------------+-----------------------+--------------------+

aws ecr

aws ecr 常用子命令


# 查看当前role
export AWS_PROFILE=PROFILE_NAME
aws ecr get-login-password \
    | docker login --username AWS --password-stdin 111111111111.dkr.ecr.ap-southeast-1.amazonaws.com

aws route53

aws route53 常用子命令

zone managment


# list zones
aws route53 list-hosted-zones | jq
# create zone
aws route53 create-hosted-zone --name example.com --caller-reference 2014-04-01-18:47 | jq
# get zone id by zone name
aws route53 list-hosted-zones \
  --query 'HostedZones[?Name==`example.com.`] | [0].Id' 
  --output text

资源集

# list record sets by zone name
aws route53 list-resource-record-sets \
  --hosted-zone-id $(aws route53 list-hosted-zones \
                      --query 'HostedZones[?Name==`example.com.`] | [0].Id' \
                      --output text) \
  | jq
# list CNAME record sets by zone name 
aws route53 list-resource-record-sets \
  --hosted-zone-id $(aws route53 list-hosted-zones \
                      --query 'HostedZones[?Name==`example.com.`] | [0].Id' \
                      --output text) \
  --query 'ResourceRecordSets[?Type==`CNAME`] \ 
  | jq
# create record set

aws s3

aws s3 常用子命令

重命名一个子目录


# list bucket
aws s3 ls
                                                                                           
# rename folder
aws s3 --recursive mv s3://your-bucket/path/to/foo s3://your-bucket/path/to/bar

aws ssm

aws sso

aws sts

aws sts 常用子命令

查看当前role


# 设置环境变量
export AWS_PROFILE=PROFILE_NAME
# 查看当前role
aws sts get-caller-identity

将assume role的临时ak sk token 注入环境变量

# assume role and set credentials to env variables, with jq
ROLE_ARN=arn:aws:iam::000000000000:role/your-role
eval $(aws sts assume-role \
        --role-arn ${ROLE_ARN} \
        --role-session-name sess-name \
        | jq -r '.Credentials | "export AWS_ACCESS_KEY_ID=\(.AccessKeyId) AWS_SECRET_ACCESS_KEY=\(.SecretAccessKey) AWS_SESSION_TOKEN=\(.SessionToken)"'
      )
# assume role and set credentials to env variables, with sed & awk
ROLE_ARN=arn:aws:iam::000000000000:role/your-role
eval $(aws sts assume-role \
        --role-arn ${ROLE_ARN} \
        --role-session-name sess-name \ 
        | egrep '(SecretAccessKey|SessionToken|AccessKeyId)' \
        | awk -F'"' '{print "export AWS"toupper(gensub(/([A-Z])/, "_\\1", "g",$2))"="$4}'
      )

gcloud

auth


CLOUDSDK_CORE_ACCOUNT=email1@domain1.com gcloud ...
CLOUDSDK_CORE_ACCOUNT=email2@domain2.com gcloud ...

gcloud config configurations create my-project1-config
gcloud config configurations activate my-project1-config
gcloud auth login  # or activate-service-account
gcloud config set project project1  # and any other configuration you need to do

gcloud config configurations create my-project2-config
gcloud config configurations activate my-project2-config
gcloud auth login  # or activate-service-account
gcloud config set project project2  # and any other configuration you need to do

CLOUDSDK_ACTIVE_CONFIG_NAME=my-project1-config gcloud ...
CLOUDSDK_ACTIVE_CONFIG_NAME=my-project2-config gcloud ...

vim

database

mysql

Login

mysql -u root -p

Show Users

SELECT User, Host FROM mysql.user;

Create User

CREATE USER 'someuser'@'localhost' IDENTIFIED BY 'somepassword';

Grant All Priveleges On All Databases

GRANT ALL PRIVILEGES ON * . * TO 'someuser'@'localhost';
FLUSH PRIVILEGES;

Show Grants

SHOW GRANTS FOR 'someuser'@'localhost';

Remove Grants

REVOKE ALL PRIVILEGES, GRANT OPTION FROM 'someuser'@'localhost';

Delete User

DROP USER 'someuser'@'localhost';

Show Databases

SHOW DATABASES

Create Database

CREATE DATABASE acme;

Delete Database

DROP DATABASE acme;

Select Database

USE acme;

Create Table

CREATE TABLE users(
id INT AUTO_INCREMENT,
   first_name VARCHAR(100),
   last_name VARCHAR(100),
   email VARCHAR(50),
   password VARCHAR(20),
   location VARCHAR(100),
   dept VARCHAR(100),
   is_admin TINYINT(1),
   register_date DATETIME,
   PRIMARY KEY(id)
);

Delete / Drop Table

DROP TABLE tablename;

Show Tables

SHOW TABLES;

Select

SELECT * FROM users;
SELECT first_name, last_name FROM users;

Where Clause

SELECT * FROM users WHERE location='Massachusetts';
SELECT * FROM users WHERE location='Massachusetts' AND dept='sales';
SELECT * FROM users WHERE is_admin = 1;
SELECT * FROM users WHERE is_admin > 0;

Delete Row

DELETE FROM users WHERE id = 6;

Update Row

UPDATE users SET email = 'freddy@gmail.com' WHERE id = 2;

Add New Column

ALTER TABLE users ADD age VARCHAR(3);

Modify Column

ALTER TABLE users MODIFY COLUMN age INT(3);

Order By (Sort)

SELECT * FROM users ORDER BY last_name ASC;
SELECT * FROM users ORDER BY last_name DESC;

Concatenate Columns

SELECT CONCAT(first_name, ' ', last_name) AS 'Name', dept FROM users;

Select Distinct Rows

SELECT DISTINCT location FROM users;

Between (Select Range)

SELECT * FROM users WHERE age BETWEEN 20 AND 25;

Like (Searching)

SELECT * FROM users WHERE dept LIKE 'd%';
SELECT * FROM users WHERE dept LIKE 'dev%';
SELECT * FROM users WHERE dept LIKE '%t';
SELECT * FROM users WHERE dept LIKE '%e%';

Not Like

SELECT * FROM users WHERE dept NOT LIKE 'd%';

IN

SELECT * FROM users WHERE dept IN ('design', 'sales');

Create & Remove Index

CREATE INDEX LIndex On users(location);
DROP INDEX LIndex ON users;

New Table With Foreign Key (Posts)

CREATE TABLE posts(
id INT AUTO_INCREMENT,
   user_id INT,
   title VARCHAR(100),
   body TEXT,
   publish_date DATETIME DEFAULT CURRENT_TIMESTAMP,
   PRIMARY KEY(id),
   FOREIGN KEY (user_id) REFERENCES users(id)
);

INNER JOIN

SELECT
  users.first_name,
  users.last_name,
  posts.title,
  posts.publish_date
FROM users
INNER JOIN posts
ON users.id = posts.user_id
ORDER BY posts.title;

New Table With 2 Foriegn Keys

CREATE TABLE comments(
	id INT AUTO_INCREMENT,
    post_id INT,
    user_id INT,
    body TEXT,
    publish_date DATETIME DEFAULT CURRENT_TIMESTAMP,
    PRIMARY KEY(id),
    FOREIGN KEY(user_id) references users(id),
    FOREIGN KEY(post_id) references posts(id)
);

Add Data to Comments Table

INSERT INTO comments(post_id, user_id, body) VALUES (1, 3, 'This is comment one'),(2, 1, 'This is comment two'),(5, 3, 'This is comment three'),(2, 4, 'This is comment four'),(1, 2, 'This is comment five'),(3, 1, 'This is comment six'),(3, 2, 'This is comment six'),(5, 4, 'This is comment seven'),(2, 3, 'This is comment seven');

Left Join

SELECT
comments.body,
posts.title
FROM comments
LEFT JOIN posts ON posts.id = comments.post_id
ORDER BY posts.title;

Join Multiple Tables

SELECT
comments.body,
posts.title,
users.first_name,
users.last_name
FROM comments
INNER JOIN posts on posts.id = comments.post_id
INNER JOIN users on users.id = comments.user_id
ORDER BY posts.title;

Aggregate Functions

SELECT COUNT(id) FROM users;
SELECT MAX(age) FROM users;
SELECT MIN(age) FROM users;
SELECT SUM(age) FROM users;
SELECT UCASE(first_name), LCASE(last_name) FROM users;

Group By

SELECT age, COUNT(age) FROM users GROUP BY age;
SELECT age, COUNT(age) FROM users WHERE age > 20 GROUP BY age;
SELECT age, COUNT(age) FROM users GROUP BY age HAVING count(age) >=2;

SHOW FIELDS FROM table


SHOW FIELDS FROM table / DESCRIBE table;

postgresql

Magic words:

psql -U postgres

Some interesting flags (to see all, use -h or --help depending on your psql version):

Most \d commands support additional param of __schema__.name__ and accept wildcards like *.*

User Related:

Configuration

sudo service postgresql stop
sudo service postgresql start
sudo service postgresql restart
sudo vim /etc/postgresql/9.3/main/postgresql.conf

# Uncomment/Change inside:
log_min_messages = debug5
log_min_error_statement = debug5
log_min_duration_statement = -1

sudo service postgresql restart
  1. Now you will get tons of details of every statement, error, and even background tasks like VACUUMs
tail -f /var/log/postgresql/postgresql-9.3-main.log
  1. How to add user who executed a PG statement to log (editing postgresql.conf):
log_line_prefix = '%t %u %d %a '

Create command

There are many CREATE choices, like CREATE DATABASE __database_name__, CREATE TABLE __table_name__ … Parameters differ but can be checked at the official documentation.

Handy queries

SELECT
   t.relname AS table_name,
   i.relname AS index_name,
   a.attname AS column_name
FROM
   pg_class t,
   pg_class i,
   pg_index ix,
   pg_attribute a,
    pg_namespace n
WHERE
   t.oid = ix.indrelid
   AND i.oid = ix.indexrelid
   AND a.attrelid = t.oid
   AND a.attnum = ANY(ix.indkey)
   AND t.relnamespace = n.oid
    AND n.nspname = 'kartones'
ORDER BY
   t.relname,
   i.relname
SELECT datname, application_name, pid, backend_start, query_start, state_change, state, query 
  FROM pg_stat_activity 
  WHERE datname='__database_name__';
SELECT * FROM pg_stat_activity WHERE waiting='t'
SELECT 
  pg_stat_get_backend_pid(s.backendid) AS procpid, 
  pg_stat_get_backend_activity(s.backendid) AS current_query
FROM (SELECT pg_stat_get_backend_idset() AS backendid) AS s;

Casting:

Query analysis:

Generating random data (source):

Get sizes of tables, indexes and full DBs:

select current_database() as database,
  pg_size_pretty(total_database_size) as total_database_size,
  schema_name,
  table_name,
  pg_size_pretty(total_table_size) as total_table_size,
  pg_size_pretty(table_size) as table_size,
  pg_size_pretty(index_size) as index_size
  from ( select table_name,
          table_schema as schema_name,
          pg_database_size(current_database()) as total_database_size,
          pg_total_relation_size(table_name) as total_table_size,
          pg_relation_size(table_name) as table_size,
          pg_indexes_size(table_name) as index_size
          from information_schema.tables
          where table_schema=current_schema() and table_name like 'table_%'
          order by total_table_size
      ) as sizes;
COPY table_name [ ( column_name [, ...] ) ]
FROM { 'filename' | STDIN }
[ [ WITH ] ( option [, ...] ) ]

COPY { table_name [ ( column_name [, ...] ) ] | ( query ) }
TO { 'filename' | STDOUT }
[ [ WITH ] ( option [, ...] ) ]
SELECT table_catalog, table_schema, table_name, privilege_type
FROM   information_schema.table_privileges
WHERE  grantee = 'user_to_check' ORDER BY table_name;
SELECT
    r.rolname,
    r.rolsuper,
    r.rolinherit,
    r.rolcreaterole,
    r.rolcreatedb,
    r.rolcanlogin,
    r.rolconnlimit,
    r.rolvaliduntil,
    ARRAY(SELECT b.rolname
      FROM pg_catalog.pg_auth_members m
      JOIN pg_catalog.pg_roles b ON (m.roleid = b.oid)
      WHERE m.member = r.oid) as memberof, 
    r.rolreplication
FROM pg_catalog.pg_roles r
ORDER BY 1;
SELECT grantee, privilege_type
FROM information_schema.role_table_grants
WHERE table_name='name-of-the-table';
SELECT pg_terminate_backend(pg_stat_activity.pid)
FROM pg_stat_activity
WHERE datname = current_database() AND pid <> pg_backend_pid();

Tools

$ echo "bind "^R" em-inc-search-prev" > $HOME/.editrc
$ source $HOME/.editrc

Resources & Documentation

sqlite

leveldb

redis

openssl

cert

key

hash

s_connect

查看服务器证书


openssl s_client -showcerts -servername httpbin.org \
    -connect httpbin.org:443 </dev/null

secp256k1

生成 secp256k1 公钥和私钥

# Install keccak-256sum
brew install sha3sum

# Generate the private key in pem format
openssl ecparam -name secp256k1 -genkey -noout > private-key.pem

# Generate the public key in pem format from private key
cat private-key.pem | openssl ec -pubout > public-key.pem

# Generate the private and public keys in hex format from private key in pem format
cat private-key.pem | openssl ec -text -noout > key.txt

# Generate the public key in hex format from public key in pem format
cat public-key.pem | openssl ec -pubin -text -noout > pub-key.txt

# Extract the public key and remove the EC prefix 0x04
cat key.txt | grep pub -A 5 | tail -n +2 | tr -d '\n[:space:]:' | sed 's/^04//' > pub.hex
cat pub-key.txt | grep pub -A 5 | tail -n +2 | tr -d '\n[:space:]:' | sed 's/^04//' > pub.hex

# Extract the private key and remove the leading zero byte
cat key.txt | grep priv -A 3 | tail -n +2 | tr -d '\n[:space:]:' | sed 's/^00//' > priv.hex

# Generate the hash and take the address part
cat pub.hex | keccak-256sum -x -l | tr -d ' -' | tail -c 41 > address.txt

tcpdump

抓http header


# 抓所有去往或者来自10.10.10.1:8080的http包的header
tcpdump -i any -A -s 10240 'tcp port 8080 and host 10.10.10.1 and (((ip[2:2] - ((ip[0]&0xf)<<2)) - ((tcp[12]&0xf0)>>2)) != 0)' \
    | egrep --line-buffered "^........(GET |HTTP\/|POST |HEAD )|^[A-Za-z0-9-]+: " \
    | sed -r 's/^........(GET |HTTP\/|POST |HEAD )/\n\1/g'

参数解释

参考 man tcpdump

tcpdump

参数描述
-ii指nterface 指定网络接口, -i any 表示所有端口
-AA指ASCII, 用ASCII打印每个包,抓取web请求非常方便
-ss 指snapshot length,-s 10240 指限制为10240 bytes,详细见man

pkg manager

apt

homebrew

brew 基本命令

查找一个包


brew search cowsay
==> Formulae
cowsay ✔

当你记不清完整包名的时候可以用正则

pxyw brew search /^postg/  
==> Formulae
postgis                   postgresql@10             postgresql@12             postgresql@14 ✔           postgresql@9.4            postgrest ✔

查看一个包信息

brew info cowsay
==> cowsay: stable 3.04 (bottled)
Configurable talking characters in ASCII art
https://github.com/tnalpgge/rank-amateur-cowsay
/opt/homebrew/Cellar/cowsay/3.04_1 (63 files, 82.8KB) *
  Poured from bottle on 2021-11-15 at 20:51:11
From: https://github.com/Homebrew/homebrew-core/blob/HEAD/Formula/cowsay.rb
License: GPL-3.0

安装包的时候禁用自动更新

HOMEBREW_NO_AUTO_UPDATE=1 brew install cowsay

有些包有多个版本,比如node,将某个版本设置为系统默认版本

# 同时安装了node@14 node@16
pxyw brew search /^node/ 
==> Formulae
node ✔                    node-sass                 node@14 ✔                 node@18
node-build                node@10                   node@16 ✔                 node_exporter ✔

# 将当前版本设置为node@16
brew unlink node
brew link node@16

安装gnu版工具替换macos版

brew install coreutils findutils gnu-tar gnu-sed gawk gnutls gnu-indent gnu-getopt grep

安装程序员必备工具

# tools
brew install gh git jq yq bash screen autossh netcat websocat telnet tree curl grpcurl wget htop pstree 
brew install nginx wrk gum tcpdump nmap macvim gomplate sha3sum

# databases
brew install redis redisinsight redis-leveldb sqlite mysql-client@5.7 mysql@5.7 postgresql@15

安装常用开发语言sdk


brew install node@16 go@1.19 python@3.10 openjdk@17

安装devops工程师必备工具

brew install kubernetes-cli kubectx kube-ps1 helm k3d awscli session-manager-plugin google-cloud-sdk

安装区块链工程师必备工具

brew install ethereum lighthouse sha3sum

snap

yum rpm

两个命令定位的关系与区别

常用yum命令

起个容器来测试yum命令

docker run --rm -it amazonlinux:2 bash

当系统里缺某个命令,你想安装但是你又不知道包名,可以通过whatprovides子命令来搜索包


yum whatprovides *bin/netstat

Loaded plugins: ovl, priorities
net-tools-2.0-0.22.20131004git.amzn2.0.2.aarch64 : Basic networking tools
Repo        : amzn2-core
Matched from:
Filename    : /bin/netstat

查看一个包有没有安装


rpm -qa | egrep ^rpm
rpm-libs-4.11.3-48.amzn2.0.2.aarch64
rpm-build-libs-4.11.3-48.amzn2.0.2.aarch64
rpm-4.11.3-48.amzn2.0.2.aarch64

查看一个rpm包里装了哪些文件在哪些地方, 比如查看包里装了哪些二进制命令

rpm -ql rpm | grep /bin
/bin/rpm
/usr/bin/rpm2cpio
/usr/bin/rpmdb
/usr/bin/rpmkeys
/usr/bin/rpmquery
/usr/bin/rpmverify

jq / yq

jq

yq

github

gh cli

相关链接:

git不用PAT或者SSHKEY, 使用sso认证, 更安全方便


# 根据提示,选Github.com > 选HTTPS > 选Y > 跳转到github认证
gh auth login

# 认证完成后git就可以push了
git push

# 检查认证状态
gh auth status 
github.com
  ✓ Logged in to github.com as web3yoda (/Users/user/.config/gh/hosts.yml)
  ✓ Git operations for github.com configured to use https protocol.
  ✓ Token: *******************

本地github多账号随意切换(通过环境变量GH_CONFIG_DIR)

# 检查认证状态
gh auth status 
github.com
  ✓ Logged in to github.com as web3yoda (/Users/user/.config/gh/hosts.yml)
  ...

# 更改配置保存位置的环境变量 https://cli.github.com/manual/gh_help_environment
export GH_CONFIG_DIR=${HOME}/.config/.gh2

# 检查认证状态处于未认证状态
gh auth status
You are not logged into any GitHub hosts. Run gh auth login to authenticate.

# 使用第二个github user认证
gh auth login

# 检查认证状态为已认证及配置文件路径为 ~/.config/.gh2
gh auth status
github.com
  ✓ Logged in to github.com as web3yoda (/Users/user/.config/.gh2/hosts.yml)
  ...

# gh可以通过GH_CONFIG_DIR切换用户session了,但是 git还不能,可以这样切换,让git使用第2个用户的token
gh auth setup-git

# 两条命令两个账户来回切换
# 切换第二个账户:
export GH_CONFIG_DIR=${HOME}/.config/.gh2; gh auth setup-git
# 操作完后,切换回主账户
unset GH_CONFIG_DIR; gh auth setup-git

gh 触发一个支持 workflow_dispatch 的action workflow


# trigger 一次 run
gh workflow run my-workflow.yaml -R web3yoda/example -f input1=value1 -f input2=value2

# 列出所有的 run的记录
gh run list -R web3yoda/example --workflow my-workflow.yaml

# 查看run的日志
gh run view  -R web3yoda/example --log 5000000001

# 

git

Forked from https://gist.github.com/hofmannsven/6814451

Global Settings

Reminder

Press minus + shift + s and return to chop/fold long lines!

Show folder content: ls -la

Notes

Do not put (external) dependencies in version control!

Setup

See where Git is located: which git

Get the version of Git: git --version

Create an alias (shortcut) for git status: git config --global alias.st status

Help: git help

General

Initialize Git: git init

Get everything ready to commit: git add .

Get custom file ready to commit: git add index.html

Commit changes: git commit -m "Message"

Commit changes with title and description: git commit -m "Title" -m "Description..."

Add and commit in one step: git commit -am "Message"

Remove files from Git: git rm index.html

Update all changes: git add -u

Remove file but do not track anymore: git rm --cached index.html

Move or rename files: git mv index.html dir/index_new.html

Undo modifications (restore files from latest commited version): git checkout -- index.html

Restore file from a custom commit (in current branch): git checkout 6eb715d -- index.html

Reset

Go back to commit: git revert 073791e7dd71b90daa853b2c5acc2c925f02dbc6

Soft reset (move HEAD only; neither staging nor working dir is changed): git reset --soft 073791e7dd71b90daa853b2c5acc2c925f02dbc6

Undo latest commit: git reset --soft HEAD~

Mixed reset (move HEAD and change staging to match repo; does not affect working dir): git reset --mixed 073791e7dd71b90daa853b2c5acc2c925f02dbc6

Hard reset (move HEAD and change staging dir and working dir to match repo): git reset --hard 073791e7dd71b90daa853b2c5acc2c925f02dbc6

Hard reset of a single file (@ is short for HEAD): git checkout @ -- index.html

Update & Delete

Test-Delete untracked files: git clean -n

Delete untracked files (not staging): git clean -f

Unstage (undo adds): git reset HEAD index.html

Update most recent commit (also update the commit message): git commit --amend -m "New Message"

Branch

Show branches: git branch

Create branch: git branch branchname

Change to branch: git checkout branchname

Create and change to new branch: git checkout -b branchname

Rename branch: git branch -m branchname new_branchname or: git branch --move branchname new_branchname

Show all completely merged branches with current branch: git branch --merged

Delete merged branch (only possible if not HEAD): git branch -d branchname or: git branch --delete branchname

Delete not merged branch: git branch -D branch_to_delete

Merge

True merge (fast forward): git merge branchname

Merge to master (only if fast forward): git merge --ff-only branchname

Merge to master (force a new commit): git merge --no-ff branchname

Stop merge (in case of conflicts): git merge --abort

Stop merge (in case of conflicts): git reset --merge // prior to v1.7.4

Undo local merge that hasn’t been pushed yet: git reset --hard origin/master

Merge only one specific commit: git cherry-pick 073791e7

Rebase: git checkout branchname » git rebase master or: git merge master branchname (The rebase moves all of the commits in master onto the tip of branchname.)

Cancel rebase: git rebase --abort

Squash multiple commits into one: git rebase -i HEAD~3 (source)

Squash-merge a feature branch (as one commit): git merge --squash branchname (commit afterwards)

Stash

Put in stash: git stash save "Message"

Show stash: git stash list

Show stash stats: git stash show stash@{0}

Show stash changes: git stash show -p stash@{0}

Use custom stash item and drop it: git stash pop stash@{0}

Use custom stash item and do not drop it: git stash apply stash@{0}

Use custom stash item and index: git stash apply --index

Create branch from stash: git stash branch new_branch

Delete custom stash item: git stash drop stash@{0}

Delete complete stash: git stash clear

Gitignore & Gitkeep

About: https://help.github.com/articles/ignoring-files

Useful templates: https://github.com/github/gitignore

Add or edit gitignore: nano .gitignore

Track empty dir: touch dir/.gitkeep

Log

Show commits: git log

Show oneline-summary of commits: git log --oneline

Show oneline-summary of commits with full SHA-1: git log --format=oneline

Show oneline-summary of the last three commits: git log --oneline -3

Show only custom commits: git log --author="Sven" git log --grep="Message" git log --until=2013-01-01 git log --since=2013-01-01

Show only custom data of commit: git log --format=short git log --format=full git log --format=fuller git log --format=email git log --format=raw

Show changes: git log -p

Show every commit since special commit for custom file only: git log 6eb715d.. index.html

Show changes of every commit since special commit for custom file only: git log -p 6eb715d.. index.html

Show stats and summary of commits: git log --stat --summary

Show history of commits as graph: git log --graph

Show history of commits as graph-summary: git log --oneline --graph --all --decorate

Compare

Compare modified files: git diff

Compare modified files and highlight changes only: git diff --color-words index.html

Compare modified files within the staging area: git diff --staged

Compare branches: git diff master..branchname

Compare branches like above: git diff --color-words master..branchname^

Compare commits: git diff 6eb715d git diff 6eb715d..HEAD git diff 6eb715d..537a09f

Compare commits of file: git diff 6eb715d index.html git diff 6eb715d..537a09f index.html

Compare without caring about spaces: git diff -b 6eb715d..HEAD or: git diff --ignore-space-change 6eb715d..HEAD

Compare without caring about all spaces: git diff -w 6eb715d..HEAD or: git diff --ignore-all-space 6eb715d..HEAD

Useful comparings: git diff --stat --summary 6eb715d..HEAD

Blame: git blame -L10,+1 index.html

Releases & Version Tags

Show all released versions: git tag

Show all released versions with comments: git tag -l -n1

Create release version: git tag v1.0.0

Create release version with comment: git tag -a v1.0.0 -m 'Message'

Checkout a specific release version: git checkout v1.0.0

Collaborate

Show remote: git remote

Show remote details: git remote -v

Add remote upstream from GitHub project: git remote add upstream https://github.com/user/project.git

Add remote upstream from existing empty project on server: git remote add upstream ssh://root@123.123.123.123/path/to/repository/.git

Fetch: git fetch upstream

Fetch a custom branch: git fetch upstream branchname:local_branchname

Merge fetched commits: git merge upstream/master

Remove origin: git remote rm origin

Show remote branches: git branch -r

Show all branches (remote and local): git branch -a

Create and checkout branch from a remote branch: git checkout -b local_branchname upstream/remote_branchname

Compare: git diff origin/master..master

Push (set default with -u): git push -u origin master

Push: git push origin master

Force-Push: `git push origin master –force

Pull: git pull

Pull specific branch: git pull origin branchname

Fetch a pull request on GitHub by its ID and create a new branch: git fetch upstream pull/ID/head:new-pr-branch

Clone to localhost: git clone https://github.com/user/project.git or: git clone ssh://user@domain.com/~/dir/.git

Clone to localhost folder: git clone https://github.com/user/project.git ~/dir/folder

Clone specific branch to localhost: git clone -b branchname https://github.com/user/project.git

Clone with token authentication (in CI environment): git clone https://oauth2:<token>@gitlab.com/username/repo.git

Delete remote branch (push nothing): git push origin :branchname or: git push origin --delete branchname

Archive

Create a zip-archive: git archive --format zip --output filename.zip master

Export/write custom log to a file: git log --author=sven --all > log.txt

Troubleshooting

Ignore files that have already been committed to a Git repository: http://stackoverflow.com/a/1139797/1815847

Security

Hide Git on the web via .htaccess: RedirectMatch 404 /\.git (more info here: http://stackoverflow.com/a/17916515/1815847)

Large File Storage

Website: https://git-lfs.github.com/

Install: brew install git-lfs

Track *.psd files: git lfs track "*.psd" (init, add, commit and push as written above)

foundry

fundry是一个区块链开发工具集合。项目地址:https://github.com/foundry-rs/foundry 直接从release页面下载bin的tgz包,解压后安装到 /usr/local/bin 下面即可使用

curl -sSL https://github.com/foundry-rs/foundry/releases/download/nightly/foundry_nightly_darwin_arm64.tar.gz | tar xzf - -C /usr/local/bin

anvil

anvil是foundry开发套件中的工具,定位跟 hardhat node一样是一个简易的本地开发用的eth节点,功能也类似,但是用rust编写,安装使用非常方便,启动速度也非常快

启动后默认使用 助记词 test test test test test test test test test test test junk 推导出一些账号,并已经充值,可以直接使用

** 该助记词推导的测试账号 **

IdAddressPrivate Key
000xf39Fd6e51aad88F6F4ce6aB8827279cffFb922660xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80
010x70997970C51812dc3A010C7d01b50e0d17dc79C80x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d
020x3C44CdDdB6a900fa2b585dd299e03d12FA4293BC0x5de4111afa1a4b94908f83103eb1f1706367c2e68ca870fc3fb9a804cdab365a
030x90F79bf6EB2c4f870365E785982E1f101E93b9060x7c852118294e51e653712a81e05800f419141751be58f605c371e15141b007a6
040x15d34AAf54267DB7D7c367839AAf71A00a2C6A650x47e179ec197488593b187f80a00eb0da91f1b9d0b13f8733639f19c30a34926a
050x9965507D1a55bcC2695C58ba16FB37d819B0A4dc0x8b3a350cf5c34c9194ca85829a2df0ec3153be0318b5e2d3348e872092edffba
060x976EA74026E726554dB657fA54763abd0C3a0aa90x92db14e403b83dfe3df233f83dfa3a0d7096f21ca9b0d6d6b8d88b2b4ec1564e
070x14dC79964da2C08b23698B3D3cc7Ca32193d99550x4bbbf85ce3377467afe5d46f804f221813b2bb87f24d81f60f1fcdbf7cbf4356
080x23618e81E3f5cdF7f54C3d65f7FBc0aBf5B21E8f0xdbda1821b80551c9d65939329250298aa3472ba22feea921c0cf5d620ea67b97
090xa0Ee7A142d267C1f36714E4a8F75612F20a797200x2a871d0798f97d79848a013d4936a73bf4cc922c825d33c1cf7073dff6d409c6
100xBcd4042DE499D14e55001CcbB24a551F3b9540960xf214f2b2cd398c806f84e317254e0f0b801d0643303237d97a22a48e01628897
110x71bE63f3384f5fb98995898A86B02Fb2426c57880x701b615bbdfb9de65240bc28bd21bbc0d996645a3dd57e7b12bc2bdf6f192c82
120xFABB0ac9d68B0B445fB7357272Ff202C5651694a0xa267530f49f8280200edf313ee7af6b827f2a8bce2897751d06a843f644967b1
130x1CBd3b2770909D4e10f157cABC84C7264073C9Ec0x47c99abed3324a2707c28affff1267e45918ec8c3f20b8aa892e8b065d2942dd
140xdF3e18d64BC6A983f673Ab319CCaE4f1a57C70970xc526ee95bf44d8fc405a158bb884d9d1238d99f0612e9f33d006bb0789009aaa
150xcd3B766CCDd6AE721141F452C550Ca635964ce710x8166f546bab6da521a8369cab06c5d2b9e46670292d85c875ee9ec20e84ffb61
160x2546BcD3c84621e976D8185a91A922aE77ECEc300xea6c44ac03bff858b476bba40716402b03e41b8e97e276d1baec7c37d42484a0
170xbDA5747bFD65F08deb54cb465eB87D40e51B197E0x689af8efa8c651a91ad287602527f3af2fe9f6501a7ac4b061667b5a93e037fd
180xdD2FD4581271e230360230F9337D5c0430Bf44C00xde9be858da4a475276426320d5e9262ecfc3ba460bfac56360bfa6c4c28b4ee0
190x8626f6940E2eb28930eFb4CeF49B2d1F2C9C11990xdf57089febbacf7ba0bc227dafbffa9fc08a93fdc68e1e42411a14efcf23656e
200x09DB0a93B389bEF724429898f539AEB7ac2Dd55f0xeaa861a9a01391ed3d587d8a5a84ca56ee277629a8b02c22093a419bf240e65d
210x02484cb50AAC86Eae85610D6f4Bf026f30f6627D0xc511b2aa70776d4ff1d376e8537903dae36896132c90b91d52c1dfbae267cd8b
220x08135Da0A343E492FA2d4282F2AE34c6c5CC1BbE0x224b7eb7449992aac96d631d9677f7bf5888245eef6d6eeda31e62d2f29a83e4
230x5E661B79FE2D3F6cE70F5AAC07d8Cd9abb2743F10x4624e0802698b9769f5bdb260a3777fbd4941ad2901f5966b854f953497eec1b
240x61097BA76cD906d2ba4FD106E757f7Eb455fc2950x375ad145df13ed97f8ca8e27bb21ebf2a3819e9e0a06509a812db377e533def7
250xDf37F81dAAD2b0327A0A50003740e1C935C709130x18743e59419b01d1d846d97ea070b5a3368a3e7f6f0242cf497e1baac6972427
260x553BC17A05702530097c3677091C5BB47a3a79310xe383b226df7c8282489889170b0f68f66af6459261f4833a781acd0804fafe7a
270x87BdCE72c06C21cd96219BD8521bDF1F42C78b5e0xf3a6b71b94f5cd909fb2dbb287da47badaa6d8bcdc45d595e2884835d8749001
280x40Fc963A729c542424cD800349a7E4Ecc48966240x4e249d317253b9641e477aba8dd5d8f1f7cf5250a5acadd1229693e262720a19
290x9DCCe783B6464611f38631e6C851bf441907c7100x233c86e887ac435d7f7dc64979d7758d69320906a0d340d2b6518b0fd20aa998
300x1BcB8e569EedAb4668e55145Cfeaf190902d3CF20x85a74ca11529e215137ccffd9c95b2c72c5fb0295c973eb21032e823329b3d2d
310x8263Fce86B1b78F95Ab4dae11907d8AF88f841e70xac8698a440d33b866b6ffe8775621ce1a4e6ebd04ab7980deb97b3d997fc64fb
320xcF2d5b3cBb4D7bF04e3F7bFa8e27081B52191f910xf076539fbce50f0513c488f32bf81524d30ca7a29f400d68378cc5b1b17bc8f2
330x86c53Eb85D0B7548fea5C4B4F82b4205C8f6Ac180x5544b8b2010dbdbef382d254802d856629156aba578f453a76af01b81a80104e
340x1aac82773CB722166D7dA0d5b0FA35B0307dD99D0x47003709a0a9a4431899d4e014c1fd01c5aad19e873172538a02370a119bae11
350x2f4f06d218E426344CFE1A83D53dAd806994D3250x9644b39377553a920edc79a275f45fa5399cbcf030972f771d0bca8097f9aad3
360x1003ff39d25F2Ab16dBCc18EcE05a9B6154f65F40xcaa7b4a2d30d1d565716199f068f69ba5df586cf32ce396744858924fdf827f0
370x9eAF5590f2c84912A08de97FA28d0529361Deb9E0xfc5a028670e1b6381ea876dd444d3faaee96cffae6db8d93ca6141130259247c
380x11e8F3eA3C6FcF12EcfF2722d75CEFC539c51a1C0x5b92c5fe82d4fabee0bc6d95b4b8a3f9680a0ed7801f631035528f32c9eb2ad5
390x7D86687F980A56b832e9378952B738b614A99dc60xb68ac4aa2137dd31fd0732436d8e59e959bb62b4db2e6107b15f594caf0f405f
400x9eF6c02FB2ECc446146E05F1fF687a788a8BF76d0xc95eaed402c8bd203ba04d81b35509f17d0719e3f71f40061a2ec2889bc4caa7
410x08A2DE6F3528319123b25935C92888B16db8913E0x55afe0ab59c1f7bbd00d5531ddb834c3c0d289a4ff8f318e498cb3f004db0b53
420xe141C82D99D85098e03E1a1cC1CdE676556fDdE00xc3f9b30f83d660231203f8395762fa4257fa7db32039f739630f87b8836552cc
430x4b23D303D9e3719D6CDf8d172Ea030F80509ea150x3db34a7bcc6424e7eadb8e290ce6b3e1423c6e3ef482dd890a812cd3c12bbede
440xC004e69C5C04A223463Ff32042dd36DabF63A25a0xae2daaa1ce8a70e510243a77187d2bc8da63f0186074e4a4e3a7bfae7fa0d639
450x5eb15C0992734B5e77c888D713b4FC67b3D679A20x5ea5c783b615eb12be1afd2bdd9d96fae56dda0efe894da77286501fd56bac64
460x7Ebb637fd68c523613bE51aad27C35C4DB199B9c0xf702e0ff916a5a76aaf953de7583d128c013e7f13ecee5d701b49917361c5e90
470x3c3E2E178C69D4baD964568415a0f0c84fd6320A0x7ec49efc632757533404c2139a55b4d60d565105ca930a58709a1c52d86cf5d3
480x35304262b9E87C00c430149f28dD154995d012070x755e273950f5ae64f02096ae99fe7d4f478a28afd39ef2422068ee7304c636c0
490xD4A1E660C916855229e1712090CcfD8a424A2E330xaf6ecabcdbbfb2aefa8248b19d811234cd95caa51b8e59b6ffd3d4bbc2a6be4c

快速启动一个ETH节点

# 无需任何参数,即可启动,rpc跟ws均监听在 8545
anvil


                             _   _
                            (_) | |
      __ _   _ __   __   __  _  | |
     / _` | | '_ \  \ \ / / | | | |
    | (_| | | | | |  \ V /  | | | |
     \__,_| |_| |_|   \_/   |_| |_|

    0.1.0 (34d279a 2022-12-08T07:55:43.707134Z)
    https://github.com/foundry-rs/foundry

Available Accounts
==================

(0) 0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266 (10000 ETH)
(1) 0x70997970c51812dc3a010c7d01b50e0d17dc79c8 (10000 ETH)
...omit

Private Keys
==================

(0) 0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80
(1) 0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d
...omit

Wallet
==================
Mnemonic:          test test test test test test test test test test test junk
Derivation path:   m/44'/60'/0'/0/
...omit
Listening on 127.0.0.1:8545

使用foundry的cast命令交互测试一下

# block number
cast bn
0

# chain id
cast cid
31337

# stateroot of block 0
cast bl 0 -j | jq -r .stateRoot
0x0000000000000000000000000000000000000000000000000000000000000000

# test websocket
echo eth_chainId | websocat --jsonrpc -n1 ws://127.0.0.1:8545 | jq -r .result | xargs printf '%d\n'

其他参数


# 查看所有参数
anvil -h

# 指定端口
anvil -p 9545

# 从指定高度 fork goerli
anvil -f https://rpc.ankr.com/eth_goerli --fork-block-number 8310500

# 初始化3个账号(默认10个),余额改为100ETH(默认10000ETH)
anvil -a 2 --balance 100
...omit
Available Accounts
==================

(0) 0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266 (100 ETH)
(1) 0x70997970c51812dc3a010c7d01b50e0d17dc79c8 (100 ETH)

Private Keys
==================

(0) 0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80
(1) 0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d


# 指定其他助记词
anvil  -a 2 --balance 100 -m "joy joy joy joy joy joy joy joy joy joy joy boy"

Available Accounts
==================

(0) 0x9f8440f96d8af1bd4ef5d93f33ea86bc83959faa (100 ETH)
(1) 0xd3dfae2216b5087d570c53286ed938deffc23a9a (100 ETH)

Private Keys
==================

(0) 0x8de7817c1529925143ad1ac9f6c6672ae18e7815bbfa86d0d0f77a8309d79e6f
(1) 0x41682dfd7dd11a9cc0d9440660d701bde640343f1bcafd8256af85f7f4660991

cast

cast是一个跟链rpc交互的工具,可以十分方便查询链上的各种数据,以及发送交易,call合约等

通过 cast -h 查看帮助操作, 几乎所有命令支持缩写,比如 cast block-number 可以写为 cast bn, 下面例子均使用缩写,请结合cast -h帮助理解

首先起一个 anvil 节点用来做实验

anvil

cast相关环境变量


# 指定 rpc endpoint, 等价于 --rpc-url , 默认使用 http://127.0.0.1:8545
cast cid --rpc-url https://rpc.ankr.com/eth_goerli
5

export ETH_RPC_URL=https://rpc.ankr.com/eth_goerli
cast cid
5

查询链信息

# 查询 chain-id
cast cid 
31337

cast cid --rpc-url https://rpc.ankr.com/eth_goerli
5

# 查看 rpc server的 eth client版本
cast client
anvil/v0.1.0

cast client --rpc-url https://rpc.ankr.com/eth_goerli
erigon/2.43.0/linux-amd64/go1.19.3

# 查看当前最新高度
cast bn
1

cast bn --rpc-url https://rpc.ankr.com/eth_goerli
8000001

# 查看当前的gasprice
cast g
2000000000

cast g --rpc-url https://rpc.ankr.com/eth_goerli
32145765

查看块数据

# 从这里开始将 ETH_RPC_URL设置为 https://rpc.ankr.com/eth_goerli
export ETH_RPC_URL=https://rpc.ankr.com/eth_goerli

# 查看指定块数据
cast bl 8000000

# 查看最高块数据
cast bl latest

# 查看指定块hash的块数据
cast bl 0x2ae83825ac6b2a2b2509da8617cf31072a5628e9a818f177316f4f4bcdfafd06

# 结合jq使用查看块数据的某个字段
cast bl 8000000 -j | jq -r .hash
0x2ae83825ac6b2a2b2509da8617cf31072a5628e9a818f177316f4f4bcdfafd06

cast bl 0x2ae83825ac6b2a2b2509da8617cf31072a5628e9a818f177316f4f4bcdfafd06 -j | jq .number
8000000

# 查看块交易列表
cast bl 8000000 -j | jq -r .transactions

# 查看块stateroot
cast bl 8000000 -j | jq -r .stateRoot

# 查看 gasLimit 与 gasUsed 评估块被填满程度
cast bl 8000000 -j | jq -r .gasUsed  | xargs cast 2d
cast bl 8000000 -j | jq -r .gasLimit | xargs cast 2d

# 查看extraData 看看block builder都留言了啥内容
cast bl 8000000 -j  | jq -r .extraData | xargs cast 2as
Made on the moon by Blocknative

查看交易数据

# goerli rpc 设置为默认rpc
export ETH_RPC_URL=https://rpc.ankr.com/eth_goerli

# 列出 块号8000000 的所有交易
cast bl 8000000 -j  | jq -r .transactions
[
  "0x416fa2558422a5838140557b02ebc21c42444f503df8a2ba5020805ee420df68",
  "0xa4f0d3ef0c47315158b08810a20d07f32ab13cf3b3ef33e52426e85a6355b678",
  "0x5f61721fc92b1175cf2f41a391688aef1b9d6acd4ed07a3591d329d82f95418d",
  ...omit
]

# 查看上面的第一笔交易内容
cast tx -j 0x416fa2558422a5838140557b02ebc21c42444f503df8a2ba5020805ee420df68 | jq
{
  "hash": "0x416fa2558422a5838140557b02ebc21c42444f503df8a2ba5020805ee420df68",
  "nonce": "0x39",
  "blockHash": "0x2ae83825ac6b2a2b2509da8617cf31072a5628e9a818f177316f4f4bcdfafd06",
  "blockNumber": "0x7a1200",
  "transactionIndex": "0x0",
  "from": "0x1f452ea54d4d934afadc300c61ca0a3b1bbde958",
  "to": "0x8efe26d6839108e831d3a37ca503ea4f136a8e73",
  "value": "0x0",
  "gasPrice": "0x54c32a27e4",
  "gas": "0xb640",
  "input": "0x39509351000000000000000000000000d179c5bed30cade4e62d53dd89240745fb4c0cc20000000000000000000000000000000000000000000000001bc16d674ec80000",
  "v": "0x1",
  "r": "0xe6c51797979239ec7408fdc065082ab2757c29554513f536b32aecf3953004bb",
  "s": "0x25f3e739d9b459aea524b201fc3c3947f2168b35638f047929b433a242eb30f8",
  "type": "0x2",
  "accessList": [],
  "maxPriorityFeePerGas": "0x54c32a27e4",
  "maxFeePerGas": "0x54c32a27e4",
  "chainId": "0x5"
}

# cast 4b解析一下上面输出里input字段的method selector对应的signature这是笔什么交易
cast 4b 0x39509351
increaseAllowance(address,uint256)

# 也到etherscan浏览器里面看一下
open https://goerli.etherscan.io/tx/0x416fa2558422a5838140557b02ebc21c42444f503df8a2ba5020805ee420df68

call合约查询数据(只读),用google搜索bitdao erc20找到主网bit合约地址 0x1A4b46696b2bB4794Eb3D4c26f1c55F9170fa4C5


# cast命令call合约的格式如下,  METHOD_SIGNATURE即方法签名 包含方法名、入参类型列表、返回类型列表,如sum(int, int)(int)
cast call ${CONTRACT_ADDRESS} ${METHOD_SIGNATURE} ${ARG1} ${ARG2} ...

# eth mainnet rpc 设置为默认rpc
export ETH_RPC_URL=https://rpc.ankr.com/eth

# 查看最大发行量, 用cast fw将结果单位wei转换为bit,也就是除以10^18次方
cast call 0x1A4b46696b2bB4794Eb3D4c26f1c55F9170fa4C5 "totalSupply()(uint256)" | cast fw

# 查看token的symbol
cast call 0x1A4b46696b2bB4794Eb3D4c26f1c55F9170fa4C5 "symbol()(string)"
BIT

# 查看黑洞账号(燃烧账号)的bit余额(注意跟cast b的native token余额区分开)
cast call 0x1A4b46696b2bB4794Eb3D4c26f1c55F9170fa4C5 "balanceOf(address)(uint256)" 0x000000000000000000000000000000000000dEaD | cast fw
780683232.168659763560000000

# 查看BitDAO国库账号的bit余额(注意跟cast b的native token余额区分开)
cast call 0x1A4b46696b2bB4794Eb3D4c26f1c55F9170fa4C5 "balanceOf(address)(uint256)" 0x78605Df79524164911C144801f41e9811B7DB73D | cast fw
6019959375.550189111112685594

# 以上都是erc20合约的标准方法名

发送普通转账交易

# 转账要花钱,所以用本地节点,启动anvil, 会显示
anvil --accounts 2
Available Accounts
==================

(0) 0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266 (10000 ETH)
(1) 0x70997970c51812dc3a010c7d01b50e0d17dc79c8 (10000 ETH)

Private Keys
==================

(0) 0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80
(1) 0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d

# unset ETH_RPC_URL 确保rpc是默认 http://localhost:8545
unset ETH_RPC_URL

# cast b查看确认一下余额
cast b 0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266 | cast fw
10000.000000000000000000
cast b 0x70997970c51812dc3a010c7d01b50e0d17dc79c8 | cast fw
10000.000000000000000000

# 0号测试账号往1好账号转1000个ETH
cast send --from 0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266 --private-key 0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80 --value $(bc<<<1000*10^18) 0x70997970c51812dc3a010c7d01b50e0d17dc79c8

blockHash               0x9cf5b57e15730866219f8bd63c557acbce673e0bf5c942fe5d480054d2e33110
blockNumber             1
contractAddress
cumulativeGasUsed       21000
effectiveGasPrice       3875175000
gasUsed                 21000
logs                    []
logsBloom               0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
root
status                  1
transactionHash         0xa61eaa5d124567eddd119b8ffc157c15749c228d1e9d77808745cfd19dbecb3b
transactionIndex        0
type

# 再次观察余额, 账号0花了1000加一点点手续费,账号1收到1000
cast b 0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266 | cast fw
8999.999916000000000000
cast b 0x70997970c51812dc3a010c7d01b50e0d17dc79c8 | cast fw
11000.000000000000000000

# 查看高度
cast bn 
1
# 查看块内容
cast bl 1
# 参考前面的命令查看交易内容
cast tx ...

发送合约调用交易

# 重新启动anvil
anvil --accounts 2
# 首先部署erc20合约 参考 https://github.com/shidaxi/web3-dev-example
git clone https://github.com/shidaxi/web3-dev-example.git
yarn install
npx hardhat run scripts/deployMySimpleToken.ts --network localhost
Deployed 0x5FbDB2315678afecb367f032d93F642f64180aa3

# 查看 totalSupply symbol owner(也就是0号测试账号), 初始供应量1000000
cast call 0x5FbDB2315678afecb367f032d93F642f64180aa3 "totalSupply()(uint256)" | cast fw
1000000.000000000000000000

cast call 0x5FbDB2315678afecb367f032d93F642f64180aa3 "symbol()(string)" 
MST

cast call 0x5FbDB2315678afecb367f032d93F642f64180aa3 "owner()(address)"
0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266

# 0号测试账号是owner有铸币权,为1号账号铸币10个MST, mint 铸币,也就是产生币的过程
cast send --from 0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266 --private-key 0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80  0x5FbDB2315678afecb367f032d93F642f64180aa3 "mint(address,uint256)" 0x70997970c51812dc3a010c7d01b50e0d17dc79c8 $(bc<<<10^19)

# 再看totalSupply以及0号1号测试账号的余额变化
cast call 0x5FbDB2315678afecb367f032d93F642f64180aa3 "totalSupply()(uint256)" | cast fw
1000010.000000000000000000

cast call 0x5FbDB2315678afecb367f032d93F642f64180aa3 "balanceOf(address)(uint256)" 0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266 | cast fw
1000000.000000000000000000

cast call 0x5FbDB2315678afecb367f032d93F642f64180aa3 "balanceOf(address)(uint256)" 0x70997970c51812dc3a010c7d01b50e0d17dc79c8 | cast fw
10.000000000000000000

# erc20 转账, 1号给0号账号转5个 然后查看余额
cast send --from 0x70997970c51812dc3a010c7d01b50e0d17dc79c8 --private-key 0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d 0x5FbDB2315678afecb367f032d93F642f64180aa3 "transfer(address, uint256)" 0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266 $(bc<<<5*10^18)

cast call 0x5FbDB2315678afecb367f032d93F642f64180aa3 "balanceOf(address)(uint256)" 0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266 | cast fw
1000005.000000000000000000

cast call 0x5FbDB2315678afecb367f032d93F642f64180aa3 "balanceOf(address)(uint256)" 0x70997970c51812dc3a010c7d01b50e0d17dc79c8 | cast fw
5.000000000000000000

数据转换


# 最常用hex转decimal
echo 0xa | xargs cast 2d

钱包操作

# 随机生成一个测试私钥
cast w n
Successfully created new keypair.
Address: 0xa60A3fF7D6E77306bc29b344cB887E6af351AA6B
Private Key: 0x1d430a0e2e062bf342fd25816f4765f0b245999cda731fc73e0e1c4c96e21b0d

# 私钥推导地址
cast w a 0x1d430a0e2e062bf342fd25816f4765f0b245999cda731fc73e0e1c4c96e21b0d
0xa60A3fF7D6E77306bc29b344cB887E6af351AA6B

数据编码解码

# 根据方法signature计算selector
cast k 'transfer(address,uint256)'
0xa9059cbb2ab09eb219583f4a59a5d0623ade346d962bcd4e46b11da047c9049b
# 4byte解码出方法签名
cast 4b 0xa9059cbb
transfer(address,uint256)

理解nonce 以及合约地址如何生成。nounce是账号交易计数器,保存在state里面,目的防止双重支付

# 启动一个新的anvil
anvil -a 2

# 查看账号0的nounce
cast n 0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266
0

# 部署合约(用的hardhat 0号账号)
npx hardhat run scripts/deployMySimpleToken.ts --network localhost
Deployed 0x5FbDB2315678afecb367f032d93F642f64180aa3

# 查看nounce变为1
cast n 0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266                 
1

# 相同合约再部署一次,合约是一个新地址
npx hardhat run scripts/deployMySimpleToken.ts --network localhost
Deployed 0xe7f1725E7734CE288F8367e1Bb143E90bb3F0512

# 查看nounce变为2
cast n 0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266                 
2

# 合约地址仅和address和nounce有关,因此可以提前计算出来
cast ca 0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266
Computed Address: 0x9fE46736679d2D9a65F0992F2272dE9f3c7fa6e0

# 再部署一次看看是否为0x9f
npx hardhat run scripts/deployMySimpleToken.ts --network localhost
Deployed 0x9fE46736679d2D9a65F0992F2272dE9f3c7fa6e0

查看 eip1967 proxy的实现 和 admin

ProxyFooAddress=0x
# cast keccak eip1967.proxy.implementation - 1
_IMPLEMENTATION_SLOT=0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc
# get implementation of a proxy
cast storage ${ProxyFooAddress} ${_IMPLEMENTATION_SLOT}

# cast keccak eip1967.proxy.admin - 1
export _ADMIN_SLOT=0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103
# get admin of a proxy
cast storage ${ProxyFooAddress} ${_ADMIN_SLOT}

chisel

forge

ethereum

rpc

通过http请求call eth rpc,查询数据

通过 curl 命令call rpc

# 查看最新高度
curl https://rpc.ankr.com/eth -s -H "Content-Type: application/json" \
    -d '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}' \
    | jq
{
  "jsonrpc": "2.0",
  "id": 1,
  "result": "0x1085fa8"
}

# 查看eth和goerli的 chainId
curl https://rpc.ankr.com/eth -s -H "Content-Type: application/json" \
    -d '{"jsonrpc":"2.0","method":"eth_chainId","params":[],"id":1}' \
    | jq -r .result \
    | xargs printf '%d\n'
1

curl https://rpc.ankr.com/eth_goerli -s -H "Content-Type: application/json" \
    -d '{"jsonrpc":"2.0","method":"eth_chainId","params":[],"id":1}'  \
    | jq -r .result \
    | xargs printf '%d\n'
5

# 查看gasPrice
curl https://rpc.ankr.com/eth -s -H "Content-Type: application/json" \
    -d '{"jsonrpc":"2.0","method":"eth_gasPrice","params":[],"id":1}'  \
    | jq -r .result \
    | xargs printf '%d\n' 
35690022251

# 查看finalized safe latest 三种状态高度
for i in finalized safe latest; do \
    curl -s https://rpc.ankr.com/eth -H "Content-Type: application/json" \
    -d '{"method":"eth_getBlockByNumber","params":["'$i'",false],"id":1,"jsonrpc":"2.0"}' \
    | jq -r .result.number | xargs printf '%d\n' 
done
17325938
17325970
17326018

# 查看account余额
curl https://rpc.ankr.com/eth_goerli -s -H "Content-Type: application/json" \
    -d '{"jsonrpc":"2.0","method":"eth_getBalance","params":["0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266", "latest"],"id":1}' \
    | jq -r .result \
    | xargs printf '%d\n'
1019489700994

# 查看account nounce
curl https://rpc.ankr.com/eth_goerli -s -H "Content-Type: application/json" \
    -d '{"jsonrpc":"2.0","method":"eth_getTransactionCount","params":["0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266", "latest"],"id":1}' \
    | jq -r .result \
    | xargs printf '%d\n'
6503
# 

websocket

通过websocat请求call eth rpc,查询数据

通过 curl 命令call rpc

# 查看最新高度
echo eth_blockNumber | websocat --jsonrpc -n1 wss://wss.hyperspace.node.glif.io/apigw/lotus/rpc/v1 \
    | jq
{
  "jsonrpc": "2.0",
  "id": 1,
  "result": "0x1085fa8"
}

# 查看 chainId
echo eth_chainId | websocat --jsonrpc -n1 wss://wss.hyperspace.node.glif.io/apigw/lotus/rpc/v1 \
    | jq -r .result \
    | xargs printf '%d\n'
3141
# 

devp2p

ethkey

geth

develop

make

node

node

npm

yarn

golang

go mod

mod就是module的缩写,顾名思义管理go 的module


# 新项目创建 go.mod 
go mod init

# 根据代码里引用的包,自动更新或者移除 module
go mod tidy

# 自动下载module
go mod download

# 查看依赖图

go mod graph

go build


# 新建个简单的go程序
mkdir mytool && cd mytool

cat <<EOF > main.go
package main

func main() {
  println("Hello Yoda!")
}
EOF

# 直接运行
go run main.go
Hello Yoda!

# 编译运行
go build main.go
./main
Hello Yoda!

# 指定output bin文件
go build -o mytool main.go
./mytool
Hello Yoda!

# 添加链接器参数, ld 指 linker,移除debug信息使得 binary更小
du -h mytool
1.1M	mytool

go build -o mybin -ldflags "-w -s" main.go
du -h mytool
844K	mytool

# 编译linux平台的binary
GOOS=linux go build -ldflags "-w -s" -o mytool main.go
file mytool
mytool: ELF 64-bit LSB executable, ARM aarch64, version 1 (SYSV), statically linked, Go BuildID=FURGh_kizf7q2mSRhtEy/WzwE086pXTlDzXRm4tTS/s9VALqNxdEK6f6aWA4Hc/PEfftgeMk8zdvOg_KZhR, stripped

# 编译mac amd64 / arm64版本
GOARCH=amd64 go build -ldflags "-w -s" -o mytool main.go
file mytool
mytool: Mach-O 64-bit executable x86_64

GOARCH=arm64 go build -ldflags "-w -s" -o mytool main.go
mytool: Mach-O 64-bit executable arm64

java

java

maven

gradle

python

pip

rust

rustc

cargo

rustup

cicd

solc

solc 常用命令选项


# 编译一个contract的abi, bin, opcode , 方法签名 等信息
solc src/MyToken.sol --abi --bin --opcode --hashes -o out

# 编译的时候map好lib路径
# import { ERC20 } from "@openzeppelin/contracts/token/ERC20/ERC20.sol";
# contract MyToken is ERC20  {}
solc @openzeppelin=lib/openzeppelin-contracts src/MyToken.sol --abi --bin -o out
Compiler run successful. Artifact(s) can be found in directory "out".

solidity

snippet