import com.fasterxml.classmate.TypeResolver;
import com.google.common.base.Optional;
import io.swagger.annotations.ApiModelProperty;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.apache.ibatis.javassist.*;
import org.apache.ibatis.javassist.bytecode.AnnotationsAttribute;
import org.apache.ibatis.javassist.bytecode.ConstPool;
import org.apache.ibatis.javassist.bytecode.annotation.Annotation;
import org.apache.ibatis.javassist.bytecode.annotation.StringMemberValue;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.core.annotation.Order;
import org.springframework.stereotype.Component;
import springfox.documentation.schema.ModelRef;
import springfox.documentation.service.ResolvedMethodParameter;
import springfox.documentation.spi.DocumentationType;
import springfox.documentation.spi.service.ParameterBuilderPlugin;
import springfox.documentation.spi.service.contexts.ParameterContext;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.util.Arrays;
import java.util.List;
import java.util.Random;
import java.util.stream.Collectors;
/**
* 重写 swagger2 的 ParameterBuilderPlugin 支持自定义白名单黑名单注解
* @author xudaz
* @date 2021/6/12
*/
@Component
@Order
@Slf4j
class MyParameterBuilderPlugin implements ParameterBuilderPlugin {
@Autowired
private TypeResolver typeResolver;
@Override
public void apply(ParameterContext parameterContext) {
ResolvedMethodParameter methodParameter = parameterContext.resolvedMethodParameter();
Class<?> originClass = parameterContext.resolvedMethodParameter().getParameterType().getErasedType();
// 排除属性
ApiIgp igpOptional = null;
@SuppressWarnings("Guava")
Optional<ApiIgp> apiIgpOptional = methodParameter.findAnnotation(ApiIgp.class);
if ( apiIgpOptional.isPresent() ) {
igpOptional = apiIgpOptional.get();
}
// 需要属性
ApiNeed needOptional = null;
@SuppressWarnings("Guava")
Optional<ApiNeed> apiNeedOptional = methodParameter.findAnnotation(ApiNeed.class);
if ( apiNeedOptional.isPresent() ) {
needOptional = apiNeedOptional.get();
}
if (null != igpOptional || null != needOptional ) {
Random random = new Random();
//model 名称
String name = originClass.getSimpleName() + SwaggerConfig.MY_MODEL_NAME_PRE + random.nextInt(100);
try {
// 排除 (黑名单)
if ( null != igpOptional ) {
String[] properties = igpOptional.value();
parameterContext.getDocumentationContext()
.getAdditionalModels()
//向documentContext的Models中添加我们新生成的Class
.add(typeResolver.resolve(createRefModelIgp(properties, originClass.getPackage()+"."+name, originClass)));
}
// 需要 (白名单)
if ( null != needOptional ) {
String[] properties = needOptional.value();
parameterContext.getDocumentationContext()
.getAdditionalModels()
//向documentContext的Models中添加我们新生成的Class
.add(typeResolver.resolve(createRefModelNeed(properties, originClass.getPackage()+"."+name, originClass)));
}
} catch (Exception e) {
log.error("swagger切面异常", e);
}
//修改Map参数的ModelRef为我们动态生成的class
parameterContext.parameterBuilder()
.parameterType("body")
.modelRef(new ModelRef(name))
.name(name);
}
}
/**
* 创建自定义mode给swagger2 排除参数
* @param properties 需要排除的参数
* @param name model 名称
* @param origin originClass
* @return r
*/
private Class<?> createRefModelIgp(String[] properties, String name, Class<?> origin) {
ClassPool pool = ClassPool.getDefault();
// 动态创建一个class
CtClass ctClass = pool.makeClass( name);
try {
Field[] fields = origin.getDeclaredFields();
List<Field> fieldList = Arrays.asList(fields);
List<String> ignoreProperties = Arrays.asList(properties);
// 过滤掉 properties 的参数
List<Field> dealFields = fieldList.stream().filter(s -> !ignoreProperties.contains(s.getName())).collect(Collectors.toList());
addField2CtClass(dealFields, origin, ctClass);
return ctClass.toClass();
} catch (Exception e) {
log.error("swagger切面异常", e);
return null;
}
}
/**
* 创建自定义mode给swagger2 需要参数
* @param properties 需要排除的参数
* @param name model 名称
* @param origin originClass
* @return r
*/
private Class<?> createRefModelNeed(String[] properties, String name, Class<?> origin) {
ClassPool pool = ClassPool.getDefault();
CtClass ctClass = pool.makeClass( name);
try {
Field[] fields = origin.getDeclaredFields();
List<Field> fieldList = Arrays.asList(fields);
List<String> ignoreProperties = Arrays.asList(properties);
// 过滤掉 非 properties 的参数
List<Field> dealFields = fieldList.stream().filter(s -> ignoreProperties.contains(s.getName())).collect(Collectors.toList());
addField2CtClass(dealFields, origin, ctClass);
return ctClass.toClass();
} catch (Exception e) {
log.error("swagger切面异常", e);
return null;
}
}
private void addField2CtClass(List<Field> dealFields, Class<?> origin, CtClass ctClass ) throws NoSuchFieldException, NotFoundException, CannotCompileException {
// 倒序遍历
for (int i = dealFields.size() - 1; i >= 0; i--) {
Field field = dealFields.get(i);
CtField ctField = new CtField(ClassPool.getDefault().get(field.getType().getName()), field.getName(), ctClass);
ctField.setModifiers(Modifier.PUBLIC);
ApiModelProperty ampAnno = origin.getDeclaredField(field.getName()).getAnnotation(ApiModelProperty.class);
String attributes = java.util.Optional.ofNullable(ampAnno).map(ApiModelProperty::value).orElse("");
//添加model属性说明
if (StringUtils.isNotBlank(attributes) ){
ConstPool constPool = ctClass.getClassFile().getConstPool();
AnnotationsAttribute attr = new AnnotationsAttribute(constPool, AnnotationsAttribute.visibleTag);
Annotation ann = new Annotation(ApiModelProperty.class.getName(), constPool);
ann.addMemberValue("value", new StringMemberValue(attributes, constPool));
attr.addAnnotation(ann);
ctField.getFieldInfo().addAttribute(attr);
}
ctClass.addField(ctField);
}
}
@Override
public boolean supports(DocumentationType documentationType) {
return true;
}
}
Author: lizhe
Swagger
/sw/pom.xml
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="<http://maven.apache.org/POM/4.0.0>" xmlns:xsi="<http://www.w3.org/2001/XMLSchema-instance>"
xsi:schemaLocation="<http://maven.apache.org/POM/4.0.0> <https://maven.apache.org/xsd/maven-4.0.0.xsd>">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-parent</artifactId>
<version>2.6.3</version>
<relativePath/> <!-- lookup parent from repository -->
</parent>
<groupId>com.lizhe</groupId>
<artifactId>sw</artifactId>
<version>0.0.1-SNAPSHOT</version>
<packaging>war</packaging>
<name>sw</name>
<description>Demo project for Spring Boot</description>
<properties>
<java.version>11</java.version>
<swagger.version>2.9.2</swagger.version>
<lombok.version>1.18.12</lombok.version>
</properties>
<dependencies>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-websocket</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-tomcat</artifactId>
<!-- <scope>provided</scope>-->
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-test</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.springfox</groupId>
<artifactId>springfox-swagger2</artifactId>
<version>${swagger.version}</version>
</dependency>
<dependency>
<groupId>io.springfox</groupId>
<artifactId>springfox-swagger-ui</artifactId>
<version>${swagger.version}</version>
</dependency>
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
<version>${lombok.version}</version>
</dependency>
<dependency>
<groupId>io.springfox</groupId>
<artifactId>springfox-bean-validators</artifactId>
<version>2.9.2</version>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-validation</artifactId>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-maven-plugin</artifactId>
</plugin>
</plugins>
</build>
</project>
sw/SwApplication.java
package com.lizhe.sw;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.web.servlet.config.annotation.EnableWebMvc;
@SpringBootApplication
@EnableWebMvc
public class SwApplication {
public static void main(String[] args) {
SpringApplication.run(SwApplication.class, args);
}
}
/sw/config/SwaggerConfig.java
package com.lizhe.sw.config;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Import;
import springfox.bean.validators.configuration.BeanValidatorPluginsConfiguration;
import springfox.documentation.builders.ApiInfoBuilder;
import springfox.documentation.builders.PathSelectors;
import springfox.documentation.builders.RequestHandlerSelectors;
import springfox.documentation.service.ApiInfo;
import springfox.documentation.service.Contact;
import springfox.documentation.spi.DocumentationType;
import springfox.documentation.spring.web.plugins.Docket;
import springfox.documentation.swagger2.annotations.EnableSwagger2;
/**
* swagger2配置类
*/
@Configuration
@EnableSwagger2
@Import(BeanValidatorPluginsConfiguration.class)
public class SwaggerConfig {
@Bean
public Docket createRestApi() {
return new Docket(DocumentationType.SWAGGER_2)
.apiInfo(apiInfo())
.select()
.apis(RequestHandlerSelectors.basePackage("com.lizhe.sw"))
.paths(PathSelectors.any())
.build();
}
private ApiInfo apiInfo() {
return new ApiInfoBuilder()
.title("Swagger Document")
.description("lizhe description")
.termsOfServiceUrl("<http://libaibai.net>")
.contact(new Contact("lizhe", "<http://libaibai.net>", "marshal_li_b@163.com"))
.version("1.0")
.build();
}
}
/sw/controller/StudentController.java
package com.lizhe.sw.controller;
import com.lizhe.sw.bean.Student;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiOperation;
import org.springframework.web.bind.annotation.*;
@Api(tags="Student Management")
@RestController
@RequestMapping("/api/v1")
public class StudentController {
@ApiOperation("Get Student")
@PostMapping("/student")
public Student getStudent(@RequestBody Student student){
student.setName("lizhe");
student.setAge(20);
return student;
}
}
/sw/src/main/java/com/lizhe/sw/bean/Student.java
package com.lizhe.sw.bean;
import io.swagger.annotations.ApiModelProperty;
import lombok.Data;
import lombok.Getter;
import lombok.Setter;
import javax.validation.constraints.Max;
import javax.validation.constraints.Min;
import javax.validation.constraints.NotNull;
@Data
@Setter
@Getter
public class Student {
@NotNull
@ApiModelProperty(value = "name", name = "name", dataType = "String",example = "Rube")
public String name;
@Max(100L)
@Min(10L)
public int age;
}
配置Nginx转发
情况一:
location /testauth/ {
proxy_pass http://auth.auth.svc:3002/api/v1/agnt-mst/;
}
curl http://localhost/testauth/?salt=123
情况二:
location /api/v1/agnt-mst/ {
proxy_pass http://auth.auth.svc:3002;
}
curl http://localhost/api/v1/agnt-mst/?salt=123
情况三:
location /api/v1/agnt-mst {
proxy_pass http://auth.auth.svc:3002;
}
curl http://localhost/api/v1/agnt-mst?salt=123
javascript 的值和引用 函数与对象(function object)
javascript 只有6种基本类型
数据类型 | 含义 | 值/引用 | 备注 |
undefined | 未定义 | 值 | 没有值 |
number | 数字 | 值 | |
boolean | 布尔 | 值 | |
string | 字符串 | 值 | 字符串在赋值运算中会按照引用的方式来处理 |
function | 方法 | 引用 | |
object | 对象 | 引用 |
一般表达式运算结果总是值
函数/方法的返回值可以是值或者引用
两个引用之间如果等值(==),则一定全等(===)
值与引用, 值与值 之间, 即使值相等(==), 也不一定全等(===)
[==在比较的时候可以转换数据类型,===严格比较,只要类型不匹配就返回flase]
然后我们来看函数和对象, javascript是一种基于对象的语言,所有对象的原型(prototype)都是Object
instanceof是一个二元运算符,如:A instanceof B. 其中,A必须是一个合法的JavaScript对象,B必须是一个合法的JavaScript函数 (function)
如果函数B在对象A的原型链 (prototype chain) 中被发现,那么instanceof操作符将返回true,否则返回false.
换句话说就是 就是测试 对象A的__proto__ 属性是否指向函数B的prototype链 ( 对象A是由函数B创建的 )

每个函数默认都会有一个属性叫prototype,
每一个通过函数和new操作符生成的对象都具有一个属性__proto__ 这个属性保存了创建它的构造函数的prototype属性的引用
结合上图就是 f1 和 f2 是由Foo 方法创建的, o1 和 o2 是由 Object方法创建的
所以
console.log(obj instanceof Object)//true
console.log(fn instanceof Function)//true
然后尝试下面的代码
function fn(){}
var obj = {}
console.log(fn instanceof Object)//true
console.log(obj instanceof Function)//false
console.log(Function instanceof Object); // true
console.log(Object instanceof Function); // true
fn是由Foo方法创建的,然后Foo.prototype指向Object.prototype, 所以 fn instanceof Object为true
obj是由Object方法创建的,但是Object.prototype并没有指向Function.prototype, 所以返回是false
Function对象的__proto__指向了Object(方法).prototype, 也就是说Function对象是由Object方法创建的,所以返回的是true
后面两个麻烦一点
console.log(Function);
console.log(Object);
返回
function Function() { [native code] }
function Object() { [native code] }
可以看到这两个东西实际上都是函数,一个是Function()函数,一个是Object()函数
参考 如果函数B在对象A的原型链 (prototype chain) 中被发现,那么instanceof操作符将返回true,否则返回false.
所有函数(上图中的Foo(),Object()和Function())的__proto__ 都是Function.prototype
其实图很清楚,最关键的是你一定要记住instanceof左边的是对象,右边的是函数,这个关键字其实只是测试
左边的对象的__proto__是否可以关联到右边创建它的函数的prototype
console.log(Function instanceof Object); // true
console.log(Object instanceof Function); // true
这两行代码中的第一行Function是对象(函数对象) , Object是类型
第二行中的Object是对象(还是函数对象) , Function是类型
通过上面的内容可以很好理解为什么通过
var fcuntion1 = function(){}; //调用function()函数获得
var fcuntion 2 = new Function(); //Function被new的时候还是调用function
都可以创建一个函数对象, 因为Function类型的constructor就是function(){}
如果你觉得上面的所有内容都不够”变态”, 实际上我们可以做得更”变态”一点
那就是__proto__属性其实是可以被重写的, 这意味着使用它时仍然不是那么太安全 ( 当然估计没人会给自己惹这种麻烦 )
function fn(){}
var obj = {}
console.log(obj instanceof Function); //false
obj.__proto__ = Function.prototype;
console.log(obj instanceof Function); // true
在这个”变态”的例子中我不得不提一下typeof
typeof用以获取一个变量或者表达式的类型,typeof一般只能返回如下几个结果:
number,boolean,string,function(函数),object(NULL,数组,对象),undefined
所以这玩意跟instanceof完全是两码事
在javascript中如果你需要调用一个函数, 需要在函数后使用调用运算符”()”.
node express 多线程
我们知道标准的Javascript是单线程的, 无法真正的使用多核cpu, 过去的做法是在不同的端口上启动多个相同或者不同的服务
例如如果一个服务要运行在多个核心上, 就必需使用脚本启动在多个端口上, 然后用负载均衡服务器对请求进行分发, 实现起来较为复杂
使用cluster集群api可以让单个程序充分利用多核cpu, 下面来看看步骤
首先需要安装下面两个模块
npm install –save cluster
npm install –save os
然后是实现代码
var cluster = require("cluster");
var http = require("http");
var numCPUs = require("os").cpus().length;
if (cluster.isMaster) {
// Fork workers.
for (var i = 0; i < numCPUs; i++) {
cluster.fork();
}
cluster.on("exit", function(worker, code, signal) {
console.log("worker " + worker.process.pid + " died");
});
} else {
// Workers can share aclearny TCP connection
// In this case its a HTTP server
http
.createServer(function(req, res) {
res.writeHead(200);
res.end("hello world\n");
})
.listen(3000);
}

cluster会在线程之间共享一个端口, 当有外部请求到达时, cluster会将这个请求转发到随机的子线程中.
这里的运行机制有点类似Nginx, 是由一个线程池 ( 通常大小为cpu物理核心数 ) 来处理所有请求, 而不是像Tomcat那样为每个请求创建一个线程.
当程序被运行后, isMaster会被设置为true, 然后就进入了第一个if块中, 在其中调用了 cluster.fork() 之后, 程序会创建一个子线程, 并重新运行, 这时cluster.isMaster会被设置成false, 需要注意的是这里有一个循环, 用于一次性创建多个子线程.
下面这个例子里将演示如何在express环境中使用cluster多线程
express使用bin文件夹下的www脚本启动http server, 所以直接修改这个脚本就可以使用cluster
app/bin/www 将此脚本的内容修改为
#!/usr/bin/env node
var app = require("../app");
var debug = require("debug")("app:server");
var http = require("http");
var cluster = require("cluster");
var numCPUs = require("os").cpus().length;
if (cluster.isMaster) {
console.log("[master] " + "start master...");
for (var i = 0; i < numCPUs; i++) {
cluster.fork();
}
cluster.on("listening", function(worker, address) {
console.log(
"[master] " +
"listening: worker" +
worker.id +
",pid:" +
worker.process.pid +
", Address:" +
address.address +
":" +
address.port
);
});
} else if (cluster.isWorker) {
console.log("[worker] " + "start worker ..." + cluster.worker.id);
var server = http.createServer(app);
server.listen(3000);
server.on("error", onError);
server.on("listening", onListening);
}
var port = normalizePort(process.env.PORT || "3000");
app.set("port", port);
function normalizePort(val) {
var port = parseInt(val, 10);
if (isNaN(port)) {
// named pipe
return val;
}
if (port >= 0) {
// port number
return port;
}
return false;
}
function onError(error) {
if (error.syscall !== "listen") {
throw error;
}
var bind = typeof port === "string" ? "Pipe " + port : "Port " + port;
// handle specific listen errors with friendly messages
switch (error.code) {
case "EACCES":
console.error(bind + " requires elevated privileges");
process.exit(1);
break;
case "EADDRINUSE":
console.error(bind + " is already in use");
process.exit(1);
break;
default:
throw error;
}
}
function onListening() {
var addr = server.address();
var bind = typeof addr === "string" ? "pipe " + addr : "port " + addr.port;
debug("Listening on " + bind);
}



Minio 入门 (2)
上一节我们创建了一个bucket , 不过你是否注意到一个细节, 我们使用的是http
实际项目中你往往需要通过 https
这里将https设置为true
lizhedeMacBook-Pro:~ lizhe$ cat ~/.s3cfg
# Setup endpoint
host_base = localhost:9090
host_bucket = localhost:9090
bucket_location = us-east-1
use_https = True
# Setup access keys
access_key = admin
secret_key = admin123456
# Enable S3 v4 signature APIs
signature_v2 = False
lizhedeMacBook-Pro:~ lizhe$
再尝试创建发现得到了错误
lizhedeMacBook-Pro:~ lizhe$ s3cmd mb s3://mybuckethttps
ERROR: SSL certificate verification failure: [SSL: UNKNOWN_PROTOCOL] unknown protocol (_ssl.c:590)
lizhedeMacBook-Pro:~ lizhe$
这里要让 minio 支持 https , 需要先生成本地证书, 这里我们简单一点, 使用go
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// Generate a self-signed X.509 certificate for a TLS server. Outputs to
// 'cert.pem' and 'key.pem' and will overwrite existing files.
package main
import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"flag"
"fmt"
"log"
"math/big"
"net"
"os"
"strings"
"time"
)
var (
host = flag.String("host", "", "Comma-separated hostnames and IPs to generate a certificate for")
validFrom = flag.String("start-date", "", "Creation date formatted as Jan 1 15:04:05 2011")
validFor = flag.Duration("duration", 365*24*time.Hour, "Duration that certificate is valid for")
isCA = flag.Bool("ca", false, "whether this cert should be its own Certificate Authority")
rsaBits = flag.Int("rsa-bits", 2048, "Size of RSA key to generate. Ignored if --ecdsa-curve is set")
ecdsaCurve = flag.String("ecdsa-curve", "", "ECDSA curve to use to generate a key. Valid values are P224, P256 (recommended), P384, P521")
)
func publicKey(priv interface{}) interface{} {
switch k := priv.(type) {
case *rsa.PrivateKey:
return &k.PublicKey
case *ecdsa.PrivateKey:
return &k.PublicKey
default:
return nil
}
}
func pemBlockForKey(priv interface{}) *pem.Block {
switch k := priv.(type) {
case *rsa.PrivateKey:
return &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(k)}
case *ecdsa.PrivateKey:
b, err := x509.MarshalECPrivateKey(k)
if err != nil {
fmt.Fprintf(os.Stderr, "Unable to marshal ECDSA private key: %v", err)
os.Exit(2)
}
return &pem.Block{Type: "EC PRIVATE KEY", Bytes: b}
default:
return nil
}
}
func main() {
flag.Parse()
if len(*host) == 0 {
log.Fatalf("Missing required --host parameter")
}
var priv interface{}
var err error
switch *ecdsaCurve {
case "":
priv, err = rsa.GenerateKey(rand.Reader, *rsaBits)
case "P224":
priv, err = ecdsa.GenerateKey(elliptic.P224(), rand.Reader)
case "P256":
priv, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
case "P384":
priv, err = ecdsa.GenerateKey(elliptic.P384(), rand.Reader)
case "P521":
priv, err = ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
default:
fmt.Fprintf(os.Stderr, "Unrecognized elliptic curve: %q", *ecdsaCurve)
os.Exit(1)
}
if err != nil {
log.Fatalf("failed to generate private key: %s", err)
}
var notBefore time.Time
if len(*validFrom) == 0 {
notBefore = time.Now()
} else {
notBefore, err = time.Parse("Jan 2 15:04:05 2006", *validFrom)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to parse creation date: %s\n", err)
os.Exit(1)
}
}
notAfter := notBefore.Add(*validFor)
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
if err != nil {
log.Fatalf("failed to generate serial number: %s", err)
}
template := x509.Certificate{
SerialNumber: serialNumber,
Subject: pkix.Name{
Organization: []string{"Acme Co"},
},
NotBefore: notBefore,
NotAfter: notAfter,
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
BasicConstraintsValid: true,
}
hosts := strings.Split(*host, ",")
for _, h := range hosts {
if ip := net.ParseIP(h); ip != nil {
template.IPAddresses = append(template.IPAddresses, ip)
} else {
template.DNSNames = append(template.DNSNames, h)
}
}
if *isCA {
template.IsCA = true
template.KeyUsage |= x509.KeyUsageCertSign
}
derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, publicKey(priv), priv)
if err != nil {
log.Fatalf("Failed to create certificate: %s", err)
}
certOut, err := os.Create("cert.pem")
if err != nil {
log.Fatalf("failed to open cert.pem for writing: %s", err)
}
if err := pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}); err != nil {
log.Fatalf("failed to write data to cert.pem: %s", err)
}
if err := certOut.Close(); err != nil {
log.Fatalf("error closing cert.pem: %s", err)
}
log.Print("wrote cert.pem\n")
keyOut, err := os.OpenFile("key.pem", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
log.Print("failed to open key.pem for writing:", err)
return
}
if err := pem.Encode(keyOut, pemBlockForKey(priv)); err != nil {
log.Fatalf("failed to write data to key.pem: %s", err)
}
if err := keyOut.Close(); err != nil {
log.Fatalf("error closing key.pem: %s", err)
}
log.Print("wrote key.pem\n")
}
这里 cert.pem 是公钥, key.pem是私钥, minio 只支持 pem 格式
为了让minio服务器可以打开https服务, 需要将证书放在
~/.minio/certs
当然你可以选择直接mount路径到 docker 容器, 也可以选择拷贝, 这里我使用 拷贝 的方案
lizhedeMacBook-Pro:ca lizhe$ pwd
/Users/lizhe/Documents/DEV/projects/golang/ca
lizhedeMacBook-Pro:ca lizhe$
lizhedeMacBook-Pro:ca lizhe$ go run generate_cert.go -ca --host "localhost"
2019/03/11 16:49:57 wrote cert.pem
2019/03/11 16:49:57 wrote key.pem
lizhedeMacBook-Pro:ca lizhe$
lizhedeMacBook-Pro:ca lizhe$ cp key.pem private.key
lizhedeMacBook-Pro:ca lizhe$ cp cert.pem public.crt
lizhedeMacBook-Pro:ca lizhe$
lizhedeMacBook-Pro:ca lizhe$ ls
cert.pem generate_cert.go key.pem private.key public.crt
lizhedeMacBook-Pro:ca lizhe$
lizhedeMacBook-Pro:ca lizhe$ docker run -p 9090:9000 --name studyminio -d -e MINIO_ACCESS_KEY=admin -e MINIO_SECRET_KEY=admin123456 minio/minio server /data
394e06ba0e040693919c6b568342fbf1f54b2c5fb51288a845b1116a11d70ef3
lizhedeMacBook-Pro:ca lizhe$ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
394e06ba0e04 minio/minio "/usr/bin/docker-ent…" 2 seconds ago Up 1 second (health: starting) 0.0.0.0:9090->9000/tcp studyminio
68cafdfd8d42 portainer/portainer "/portainer" 3 weeks ago Up 5 hours 0.0.0.0:9000->9000/tcp portainer
lizhedeMacBook-Pro:ca lizhe$
lizhedeMacBook-Pro:ca lizhe$ docker cp private.key 394e06ba0e04:/root/.minio/certs
lizhedeMacBook-Pro:ca lizhe$ docker cp public.crt 394e06ba0e04:/root/.minio/certs
lizhedeMacBook-Pro:ca lizhe$
lizhedeMacBook-Pro:ca lizhe$ docker restart studyminio
studyminio
lizhedeMacBook-Pro:ca lizhe$

NOTE: Location of custom certs directory can be specified using --certs-dir
command line option.
然后就可以通过 https://localhost:9090/ 来访问 minio 服务器了


当然 s3cmd 也可以使用了
lizhedeMacBook-Pro:ca lizhe$ cd /Users/lizhe/Documents/DEV/projects/golang/ca
lizhedeMacBook-Pro:ca lizhe$ ls
cert.pem generate_cert.go key.pem private.key public.crt
lizhedeMacBook-Pro:ca lizhe$ cat ~/.s3cfg
# Setup endpoint
host_base = localhost:9090
host_bucket = localhost:9090
bucket_location = us-east-1
use_https = True
# Setup access keys
access_key = admin
secret_key = admin123456
# Enable S3 v4 signature APIs
signature_v2 = False
lizhedeMacBook-Pro:ca lizhe$ s3cmd mb --no-check-certificate s3://mybucket1
Bucket 's3://mybucket1/' created
lizhedeMacBook-Pro:ca lizhe$ s3cmd mb --ca-certs ./public.crt s3://mybucket2
Bucket 's3://mybucket2/' created
lizhedeMacBook-Pro:ca lizhe$ cd /
lizhedeMacBook-Pro:/ lizhe$ s3cmd mb --no-check-certificate s3://mybucket3
Bucket 's3://mybucket3/' created
lizhedeMacBook-Pro:/ lizhe$


https://docs.minio.io/docs/how-to-secure-access-to-minio-server-with-tls
https://github.com/minio/minio/tree/master/docs/tls/kubernetes
Minio 入门 (1)
首先谈谈背景, 第一次听说Minio是在公司的一个基于AWS和Kubernetes的项目中, 这个项目本身的生产环境是 AWS上运行的Kubernetes集群, 但是本地开发环境使用的是docker swarm和minio
AWS , Kubernetes , docker swarm 这里就不老生常谈了, 那么minio 到底是个什么鬼
原来这个项目的图片在生产环境中是存储在S3桶上的, 本地开发环境中无法( 也可能是没钱 ) 让每个人都使用自己的s3桶, 所以我们使用了一个代替品
兼容亚马逊S3接口的minio , 既然接口是兼容的, 那么代码也就可以跨s3和minio使用了
minio可以方便的通过docker安装, 使用的端口是 9000, 数据的存储位置以参数形式决定
不过这里我的9000端口被portainer占用了, 所以我将 minio 的端口映射到9090
注意使用 -e MINIO_ACCESS_KEY=admin -e MINIO_SECRET_KEY=admin123456 配置了初始化用户名和密码
用户名要大于3个字符, 密码要在8到40个字符之间
Access key length should be between minimum 3 characters in length.
Secret key should be in between 8 and 40 characters.
lizhedeMacBook-Pro:study lizhe$ docker images | grep minio/minio
minio/minio latest a3e496686886 3 weeks ago 41.2MB
lizhedeMacBook-Pro:study lizhe$ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
68cafdfd8d42 portainer/portainer "/portainer" 3 weeks ago Up 3 hours 0.0.0.0:9000->9000/tcp portainer
lizhedeMacBook-Pro:study lizhe$ docker run -p 9090:9000 --name studyminio -d -e MINIO_ACCESS_KEY=admin -e MINIO_SECRET_KEY=admin123456 minio/minio server /data
0215e71faa7e308ed175fe3e69dde784687b715210deed79fe6416ec195d2c59
lizhedeMacBook-Pro:study lizhe$ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
0215e71faa7e minio/minio "/usr/bin/docker-ent…" 5 seconds ago Up 4 seconds (health: starting) 0.0.0.0:9090->9000/tcp studyminio
68cafdfd8d42 portainer/portainer "/portainer" 3 weeks ago Up 3 hours 0.0.0.0:9000->9000/tcp portainer
lizhedeMacBook-Pro:study lizhe$



对象存储的优缺点在这里就不一一描述了, 你可以简单理解为 它比块存储(整个硬盘映射) 和 文件存储 都更灵活, 更容易实现CDN, 更容易在云环境下部署, 高可用等特点
然后我们来看一下如何通过S3cmd 来操作它
S3cmd is a CLI client for managing data in AWS S3, Google Cloud Storage or any cloud storage service provider that uses the s3 protocol. S3cmd is open source and is distributed under the GPLv2 license.
因为本地环境是Mac, 所以这里我选择使用pip来安装它, 先安装pip
lizhedeMacBook-Pro:~ lizhe$ sudo easy_install pip
然后安装 s3cmd
sudo pip install s3cmd
可惜的是使用官方文档中的上述命令, 并不能让我在macos上顺利安装 s3cmd 命令, 会得到一个错误
error: could not create ‘/System/Library/Frameworks/Python.framework/Versions/2.7/share’: Operation not permitted
这是因为EI Captain引入了SIP管理机制,所以旧版本的pip创建的文件目录操作被拒绝,包括使用root也是如此

解决办法是加入 –user 参数
sudo pip install s3cmd –user
将 s3cmd 加入环境变量
sudo vi ~/.bash_profile
lizhedeMacBook-Pro:~ lizhe$ cat ~/.bash_profile
VAGRANT_HOME="/Users/lizhe/Documents/vagrant_workspace/vagrant_home"
export HOMEBREW_BOTTLE_DOMAIN=https://mirrors.ustc.edu.cn/homebrew-bottles
export JAVA_HOME=/Library/Java/JavaVirtualMachines/jdk1.8.0_171.jdk/Contents/Home
export PATH=$JAVA_HOME/bin:$PATH
export PATH=/Users/lizhe/Library/Python/2.7/bin:$PATH
source ~/.bash_profile
sudo chmod +x /Users/lizhe/Library/Python/2.7/bin/s3cmd
创建一个配置文件 ~/.s3cfg
lizhedeMacBook-Pro:~ lizhe$ cat ~/.s3cfg
# Setup endpoint
host_base = localhost:9090
host_bucket = localhost:9090
bucket_location = us-east-1
use_https = False
# Setup access keys
access_key = admin
secret_key = admin123456
# Enable S3 v4 signature APIs
signature_v2 = False
lizhedeMacBook-Pro:~ lizhe$
尝试创建一个 bucket
lizhedeMacBook-Pro:~ lizhe$ s3cmd mb s3://mybucket
Bucket ‘s3://mybucket/’ created

可以看到 bucket 已经创建

Jenkins + selenium + pytest + allure 自动化测试
本篇文章将描述如何使用 jenkins,selenium,pytest 和 allure 运行自动化测试并且生成 allure 报告
我们知道 selenium 需要使用能够相互兼容的 driver 和 chrome 才能正确工作,不兼容的版本之间会造成各种古怪问题
所以这里我直接使用了 joyzoursky/python-chromedriver:3.7-selenium 镜像作为base,但是这个镜像没有安装 allure
所以用这个镜像作为base我创建了一个自己的镜像 ibaibai/selenium-runner
Jenkinsfile
def label = "mypod-${UUID.randomUUID().toString()}"
podTemplate(label: label, yaml: """
kind: Pod
metadata:
name: selenium-runner
spec:
imagePullSecrets:
- name: regcred
containers:
- name: selenium-runner
image: libaibai/selenium-runner
imagePullPolicy: Always
command:
- /bin/cat
tty: true
volumeMounts:
- name: jenkins-docker-cfg
mountPath: /root
securityContext:
privileged: false
volumes:
- name: jenkins-docker-cfg
projected:
sources:
- secret:
name: regcred
items:
- key: .dockerconfigjson
path: .docker/config.json
"""
) {
node(label) {
stage('Calling selenium') {
sh label: '', script: 'free -m'
timeout(time: 1, unit: 'HOURS'){
dir('workdir') {
git credentialsId: '1b09346a-ac0b-4e71-b9c3-2e73ef43dbc0', url: 'https://lizhe@lizhe.name/autotest.git'
}
}
container(name: 'selenium-runner', shell: '/bin/sh') {
catchError {
sh label: '', script: ' sh -c set +e; pytest /home/jenkins/workspace/testtires/workdir/pytestpoc/hello.py --alluredir allure-results ; set -e'
}
sh label: '', script: ' whoami'
sh label: '', script: ' chmod -R 777 /home/jenkins/workspace/testtires/allure-results'
allure includeProperties: false, jdk: '', results: [[path: 'allure-results']]
}
sh label: '', script: 'free -m'
}
}
}
这个脚本中可控的东西并不多,主要是需要调用
allure includeProperties: false, jdk: ”, results: [[path: ‘allure-results’]]
来生成allure报告, 注意这里 allure 使用的 results 目录是 allure-results
这个目录的声明需要和前面 py 脚本生成的result路径一致
hello.py –alluredir allure-results
hello.py
import allure
import pytest
import os, sys
from selenium import webdriver
import time
@allure.feature('test_module_01')
@allure.story('test_story_01')
def test_case_01():
option = webdriver.ChromeOptions()
option.add_argument("headless")
option.add_argument("--no-sandbox")
driver = webdriver.Chrome(chrome_options=option)
# driver = webdriver.Chrome()
driver.get("https://lizhe.com/?site=abc")
time.sleep(10)
print(driver.title)
driver.get("https://lizhe.com/abc/zh/Root/Tire/c/50000000")
tires = driver.find_elements_by_class_name("product__listing--price")
print(len(tires))
for tire in tires:
print(tire.text)
driver.quit()
assert len(tires) != 0
@allure.feature('test_module_01')
@allure.story('test_story_02')
def test_case_02():
assert 0==1
@allure.feature('test_module_01')
@allure.story('test_story_03')
def test_case_03():
assert 0==0
if __name__ == '__main__':
pytest.main(['-s', '-q', '--alluredir', 'allure-results'])



allure.issues.tracker.pattern
http://tracker.company.com/%s
CircleCI build golang docker image
version: 2
jobs:
build:
docker:
- image: golang:alpine3.12
auth:
username: libaibai
password: $password
steps:
- checkout
- setup_remote_docker:
version: 19.03.13
docker_layer_caching: false
- run:
name: Install Docker client
command: apk add docker-cli
- run: |
TAG=0.1
docker build -t libaibai/circleci-demo-docker:v1 .
echo $password | docker login -u libaibai --password-stdin
docker push libaibai/circleci-demo-docker:v1
Github build golang docker image
name: Docker Image CI
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Build and Push Docker Iamge
uses: docker/build-push-action@v1
with:
username: ${{ secrets.username }}
password: ${{ secrets.password }}
repository: ${{ secrets.username }}/gohellobuild
tag_with_sha: true