Friday, November 13, 2020

Verify cert and private key of pair created using EC algorithm

 

Retrieve the public key from cert and do a md5

openssl x509 -in server.crt -pubkey -noout | openssl md5

 

Retrieve the public key from private key and do a md5

openssl pkey -pubout -in server.key | openssl md5


If the outputs from two commands match, then the certificate must be created from the private key.

Thursday, October 15, 2020

Hyperledger Fabric event listener

 Function to listen to chaincode event

func listenEvents() {
cnfg := config.FromFile("./connection.json")
fmt.Println(reflect.TypeOf(cnfg))
sdk, err := fabsdk.New(cnfg)
if err != nil {
fmt.Printf("Failed to create new SDK: %s", err)
}
defer sdk.Close()
clientChannelContext := sdk.ChannelContext(channelID, fabsdk.WithUser(orgAdmin), fabsdk.WithOrg(orgName))
eventClient, err := event.New(clientChannelContext, event.WithBlockEvents(),
event.WithSeekType(seek.Oldest))
if err != nil {
fmt.Printf("Not be able to create new events client: %s", err)
}

reg, notifier, err := eventClient.RegisterChaincodeEvent("offering", "OFFERING SUBMITTED")
defer eventClient.Unregister(reg)

donechannel := make(chan string)
go func() {
// Just a function to demostrate that we can send signal to end the listening loop
time.Sleep(1 * time.Minute)
donechannel <- "QUIT NOW"
}()

defer close(donechannel)
end:
for {
select {
case ccEvent := <-notifier:
fmt.Printf("Received cc event: %#v", ccEvent)
fmt.Printf("The payload is %v", ccEvent.Payload)
fmt.Printf("TxID is %s", ccEvent.TxID)
case msg := <-donechannel:
fmt.Printf(msg)
break end
}
}
fmt.Println("Process finished")
}

Wednesday, October 14, 2020

Query Fabric transaction

 

func useClientQuery() {
cnfg := config.FromFile("./connection.json")
fmt.Println(reflect.TypeOf(cnfg))
sdk, err := fabsdk.New(cnfg)
if err != nil {
fmt.Printf("Failed to create new SDK: %s", err)
}
defer sdk.Close()
clientChannelContext := sdk.ChannelContext(channelID, fabsdk.WithUser(orgAdmin), fabsdk.WithOrg(orgName))
client, err := ledger.New(clientChannelContext)
if err != nil {
fmt.Printf("Can not get ledger client")
} else {
fmt.Printf("The client is %v", client)
}
tx, err := client.QueryTransaction("e969096e24e768bff2df17d640404b6b72093fa008509acaa31cc8adb6c72260")
fmt.Printf("validation code %d", tx.GetValidationCode())
fmt.Printf("The tx is %v", tx)
//client.QueryBlock()
//client.QueryBlockByTxID("e969096e24e768bff2df17d640404b6b72093fa008509acaa31cc8adb6c72260")
}


Friday, September 11, 2020

Zero knowledger proof non-interactive

How to make zero-knowledge proofs non-interactive?

 

With earlier zero-knowledge verification systems there was one big problem. For it to work, the prover and the verifier had to be online at the same time. In other words, the process was “interactive”. This made the entire system inefficient and almost impossible to scale up. The verifiers couldn’t possibly be online at the same time as provers all the time? There needed to be a system to make this more efficient.

In 1986, Fiat and Shamir invented the Fiat-Shamir heuristic and successfully changed the interactive zero-knowledge proof to non-interactive zero knowledge proof. This helped the entire protocol work without any interaction. The procedure behind it is very simple.

So, to give you an example, this is how zero knowledge proofs used to work before Fiat and Shamir. Let’s prove this using simple discrete logarithms.

 

  • Anna wants to prove to Carl that she knows a value x such that y = g^x to a base g.

 

  • Anna picks a random value v from a set of values Z, and computes t = g^v and sends t to Carl.

 

  • Carl picks a random value c from the set Z and sends it to Anna.

 

  • Anna computes r = v-c*x and returns r to Carl.

 

  • Carl checks if t= g^r * y^c  holds or not ( since r= v-c*x, y= g^x and by simple substitution, g^(v-c*x)* g ^ c*x = g^v = t).

 

  • Carl doesn’t know the value of x, by merely checking if t = g^r * y^c he can verify that Anna does indeed know the value of x.

 

Now while the above interaction is zero-knowledge, the problem with this is that Anna and Carl need to be online and exchanging values for it to work.

How can Anna prove to Carl that she has knowledge of something without Carl being online? She can do so by using a simple cryptographic hash function, as Fiat and Shamir theorized.

 

Let’s look how the example above would work in a non-interactive way:

 

  • Anna wants to prove to Carl that she knows a value x such that y = g^x to a base g.

 

  • Anna picks a random value v from a set of values Z, and computes t = g^v.

 

  • Anna computes c = H(g,y,t) where H() is a hash function.

 

  • Anna computes r = v – c*x.

 

  • Carl or anyone can then check if t = g^r * y^c.

 

So, as you can see, zero knowledge proofs were made non interactive. And this was what laid the foundations for Zk-Snarks.

 

The above content is taken from this link https://blockgeeks.com/guides/zcash/

Thursday, September 10, 2020

Create self signed certificates

 ################## Create root ca certificate
# Create a private key
openssl genpkey -algorithm EC -pkeyopt ec_paramgen_curve:P-256 \
  -pkeyopt ec_param_enc:named_curve -out ca.key

# Extract the public key
openssl ec -in ca.key -pubout -out ca_public.key

# Create signing certificate in one step
# Create root ca certificate
openssl req -new -days 3650 -nodes -x509 -extensions v3_req -extensions v3_ca \
  -subj "/C=US/ST=North Carolina/L=Raleigh/O=org0.example.com/CN=ca1.org0.example.com" \
  -addext "keyUsage=critical,digitalSignature,keyEncipherment,keyCertSign,cRLSign" \
  -addext "extendedKeyUsage=serverAuth,clientAuth" \
  -addext "subjectAltName=IP.1:192.168.56.32" -key ca.key  \
  -out ca.crt

# Inspect the certificate
openssl x509 -noout -text -in ca.crt

################ Create User certificate
# Create private key for admin
openssl genpkey -algorithm EC -pkeyopt ec_paramgen_curve:P-256 \
  -pkeyopt ec_param_enc:named_curve -out admin.key

# Extract public key for admin
openssl ec -in admin.key -pubout -out admin_public.key

# Create admin CSR
openssl req -new -key admin.key -extensions v3_req \
  -subj "/C=US/ST=North Carolina/L=Raleigh/OU=admin/OU=client/CN=Admin@org0.example.com" \
  -out admin.csr

# Verify CSR
openssl req -verify -text -noout -in admin.csr

# The content of v3.ext file
# keyUsage = critical,digitalSignature
# basicConstraints = critical,CA:FALSE
# authorityKeyIdentifier = keyid,issuer

# key usage can be other values as well
# keyUsage = digitalSignature, nonRepudiation, keyEncipherment, dataEncipherment

# Now sign the CSR with ca key and cert
openssl x509 -req -days 3560 -extfile v3.ext -in admin.csr -CA ca.crt -CAkey ca.key \
  -CAcreateserial -sha256 -out admin.crt

# Verify certificate
openssl x509 -noout -text -in admin.crt

################ Create peer and orderer certificate
# Create private key for peer1
openssl genpkey -algorithm EC -pkeyopt ec_paramgen_curve:P-256 \
  -pkeyopt ec_param_enc:named_curve -out peer1.key

# Extract public key for peer1
openssl ec -in peer1.key -pubout -out peer1_public.key

# Create peer1 CSR
openssl req -new -key peer1.key -extensions v3_req \
  -subj "/C=US/ST=North Carolina/L=Raleigh/OU=peer/CN=peer1.org0.example.com" \
  -out peer1.csr

# Verify CSR
openssl req -verify -text -noout -in peer1.csr

# The content of v3.ext file
# keyUsage = critical,digitalSignature
# basicConstraints = critical,CA:FALSE
# authorityKeyIdentifier = keyid,issuer

# key usage can be other values as well
# keyUsage = digitalSignature, nonRepudiation, keyEncipherment, dataEncipherment

# Now sign the CSR with ca key and cert
openssl x509 -req -days 3560 -extfile v3.ext -in peer1.csr -CA ca.crt -CAkey ca.key \
  -CAcreateserial -sha256 -out peer1.crt

# Verify certificate
openssl x509 -noout -text -in peer1.crt

Friday, August 14, 2020

Windows 10 docker desktop volumes

 Windows 10 docker desktop made a lot of changes between wsl and wsl2 especially how the mount works. This change created a lot of issues. Here is the main difference:


WSL:

mount point starts at this:

on drive C: /host_mnt/c

on drive D: /host_mnt/d


WSL2:

mount point starts like this

on drive C: /run/desktop/mnt/host/c

on drive D: /run/desktop/mnt/host/d


Wednesday, July 15, 2020

certificates key usage and extended key usage

Key usage and extended key usage

Key usage extensions define the purpose of the public key contained in a certificate. You can use them to restrict the public key to as few or as many operations as needed. For example, if you have a key used only for signing or verifying a signature, enable the digital signature and/or non-repudiation extensions. Alternatively, if a key is used only for key management, enable key encipherment.


Key usage

The following table describes the key usage extensions available for certificates created using the CA process.
Note: The digital signature and data encipherment key usage extensions are enabled by default for all Internet certificates.
Table 1. Key usage extensions
Key usage extension
Description
Digital signature
Use when the public key is used with a digital signature mechanism to support security services other than non-repudiation, certificate signing, or CRL signing. A digital signature is often used for entity authentication and data origin authentication with integrity.
Non-repudiation
Use when the public key is used to verify digital signatures used to provide a non-repudiation service. Non-repudiation protects against the signing entity falsely denying some action (excluding certificate or CRL signing).
Key encipherment
Use when a certificate will be used with a protocol that encrypts keys. An example is S/MIME enveloping, where a fast (symmetric) key is encrypted with the public key from the certificate. SSL protocol also performs key encipherment.
Data encipherment
Use when the public key is used for encrypting user data, other than cryptographic keys.
Key agreement
Use when the sender and receiver of the public key need to derive the key without using encryption. This key can then can be used to encrypt messages between the sender and receiver. Key agreement is typically used with Diffie-Hellman ciphers.
Certificate signing
Use when the subject public key is used to verify a signature on certificates. This extension can be used only in CA certificates.
CRL signing
Use when the subject public key is to verify a signature on revocation information, such as a CRL.
Encipher only
Use only when key agreement is also enabled. This enables the public key to be used only for enciphering data while performing key agreement.
Decipher only
Use only when key agreement is also enabled. This enables the public key to be used only for deciphering data while performing key agreement.

Extended key usage

Extended key usage further refines key usage extensions. An extended key is either critical or non-critical. If the extension is critical, the certificate must be used only for the indicated purpose or purposes. If the certificate is used for another purpose, it is in violation of the CA's policy.
If the extension is non-critical, it indicates the intended purpose or purposes of the key and may be used in finding the correct key/certificate of an entity that has multiple keys/certificates. The extension is then only an informational field and does not imply that the CA restricts use of the key to the purpose indicated. Nevertheless, applications that use certificates may require that a particular purpose be indicated in order for the certificate to be acceptable.
If a certificate contains both a critical key usage field and a critical extended key usage field, both fields must be processed independently, and the certificate be used only for a purpose consistent with both fields. If there is no purpose consistent with both fields, the certificate must not be used for any purpose.
Table 2. Extended key usage
Extended key
Enable for these key usage extensions
TLS Web server authentication
Digital signature, key encipherment or key agreement
TLS Web client authentication
Digital signature and/or key agreement
Sign (downloadable) executable code
Digital signature
Email protection
Digital signature, non-repudiation, and/or key encipherment or key agreement
IPSEC End System (host or router)
Digital signature and/or key encipherment or key agreement
IPSEC Tunnel
Digital signature and/or key encipherment or key agreement
IPSEC User
Digital signature and/or key encipherment or key agreement
Timestamping
Digital signature, non-repudiation.

Monday, July 13, 2020

Process when changes made to operator data type

When changes made to operator data type, one will need to run the following command:

0. setup GOPATH and GOROOT correctly first.
1. operator-sdk generate k8s
2. operator-sdk generate openapi
3. operator-sdk build <imagename>
4. operator-sdk up local

The above command is based on operator-sdk prior v0.12.0. Newer operator-sdk removed generator openapi, then you will need to download openapi generator for that.

Wednesday, June 17, 2020

Ingress services on IBM Cloud

TCP ports (tcp-ports)

Access an app via a non-standard TCP port.
Description
Use this annotation for an app that runs a TCP streams workload.
The ALB operates in pass-through mode and forwards traffic to back-end apps. SSL termination is not supported in this case. The TLS connection is not terminated and passes through untouched.

The below was commented by Tong
If this is indeed the case, then this should be the ideal case for Fabric node setup. It should be desirable for the traffic being untouched between the client and the Fabric nodes.


Here is another section about TLS using ALB on IBM Cloud.

Step 4: Select TLS termination

After you map your custom domain, choose whether to use TLS termination.
The ALB load balances HTTP network traffic to the apps in your cluster. To also load balance incoming HTTPS connections, you can configure the ALB to decrypt the network traffic and forward the decrypted request to the apps that are exposed in your cluster.

SSL services support (ssl-services)

Allow HTTPS requests and encrypt traffic to your upstream apps.
Description
When your Ingress resource configuration has a TLS section, the Ingress ALB can handle HTTPS-secured URL requests to your app. By default, the ALB terminates the TLS termination and decrypts the request before using the HTTP protocol to forward the traffic to your apps. If you have apps that require the HTTPS protocol and need traffic to be encrypted, use the ssl-services annotation. With the ssl-services annotation, the ALB terminates the external TLS connection, then creates a new SSL connection between the ALB and the app pod. Traffic is re-encrypted before it is sent to the upstream pods.
If your back-end app can handle TLS and you want to add additional security, you can add one-way or mutual authentication by providing a certificate that is contained in a secret.

Tuesday, June 16, 2020

Ingress Nginx network settings

The following network settings are for a machine which hosts nginx.
sysctl -e -w fs.file-max=6000000;  
sysctl -e -w fs.nr_open=10000000;  
sysctl -e -w net.core.rmem_max=16777216;  
sysctl -e -w net.core.wmem_max=16777216;  
sysctl -e -w net.core.rmem_default=12582912;  
sysctl -e -w net.core.wmem_default=12582912;  
sysctl -e -w net.core.optmem_max=25165824;  
sysctl -e -w net.core.netdev_max_backlog=262144;  
sysctl -e -w net.core.somaxconn=32768;  
sysctl -e -w net.core.rps_sock_flow_entries=32768;  
sysctl -e -w net.ipv4.ip_local_port_range="1025 65535";  
sysctl -e -w net.ipv4.tcp_rmem="8192 262144 16777216";  
sysctl -e -w net.ipv4.tcp_wmem="8192 262144 16777216";  
sysctl -e -w net.ipv4.udp_rmem_min=16384;  
sysctl -e -w net.ipv4.udp_wmem_min=16384;  
sysctl -e -w net.ipv4.ip_no_pmtu_disc=0;  
sysctl -e -w net.ipv4.route.flush=1;  
sysctl -e -w net.ipv4.tcp_dsack=1;  
sysctl -e -w net.ipv4.tcp_sack=1;  
sysctl -e -w net.ipv4.tcp_fack=1;  
sysctl -e -w net.ipv4.tcp_max_tw_buckets=1440000;  
sysctl -e -w net.ipv4.tcp_tw_recycle=0;  
sysctl -e -w net.ipv4.tcp_tw_reuse=1;  
sysctl -e -w net.ipv4.tcp_frto=0;  
sysctl -e -w net.ipv4.tcp_syncookies=1;  
sysctl -e -w net.ipv4.tcp_max_syn_backlog=32768;  
sysctl -e -w net.ipv4.tcp_synack_retries=2;  
sysctl -e -w net.ipv4.tcp_syn_retries=3;  
sysctl -e -w net.ipv4.tcp_fin_timeout=5;  
sysctl -e -w net.ipv4.tcp_retries2=5;  
sysctl -e -w net.ipv4.tcp_no_metrics_save=1;  
sysctl -e -w net.ipv4.tcp_moderate_rcvbuf=1;  
sysctl -e -w net.ipv4.tcp_timestamps=1;  
sysctl -e -w net.ipv4.tcp_keepalive_time=300;  
sysctl -e -w net.ipv4.tcp_keepalive_intvl=30;  
sysctl -e -w net.ipv4.tcp_keepalive_probes=6;  
sysctl -e -w net.ipv4.tcp_slow_start_after_idle=0;  
sysctl -e -w net.ipv4.tcp_window_scaling=1;  
sysctl -e -w net.ipv4.tcp_low_latency=1;  
sysctl -e -w net.ipv4.tcp_max_orphans=262144;  
sysctl -e -w net.nf_conntrack_max=9145728;  
sysctl -e -w net.netfilter.nf_conntrack_max=9145728;  
sysctl -e -w net.netfilter.nf_conntrack_tcp_timeout_time_wait=10;  
sysctl -e -w net.netfilter.nf_conntrack_tcp_timeout_fin_wait=10;  
sysctl -e -w net.netfilter.nf_conntrack_tcp_timeout_close_wait=30;  
sysctl -e -w net.netfilter.nf_conntrack_tcp_loose=1;  
sysctl -e -w net.ipv4.tcp_rfc1337=1;

Wednesday, June 10, 2020

Create k8s load balancer with nginx backend

 
Use the following yaml file to create a load balancer service on k8s. This load
balancer service will be backed by the deployment which only consists replicate
set with 2 containers using nginx.
 
 
---
apiVersion: v1
kind: Service
metadata:
  name: my-service8080
spec:
  selector:
    app: my-nginx
  ports:
    - port: 8080
      targetPort: 80
  type: LoadBalancer
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: my-nginx
spec:
  selector:
    matchLabels:
      app: my-nginx
  replicas: 2
  template:
    metadata:
      labels:
        app: my-nginx
    spec:
      containers:
      - name: my-nginx
        image: nginx
        ports:
        - containerPort: 80

The above picked a port 8080 to access the service, the application actually runs on port 80. In many
other applications, the port may be other numbers but the service port can be always specified to maintain a consistent endpoint for other applications.

Once the service and the deployment were created successfully, you can use the following methods to get url to hit the service.

*
   kubectl get svc my-service8080

The above should return information which contains external-ip with ports, the combination of the two will make up the url to hit the service.

**
   kubectl get svc my-service8080 -o yaml

This will return a yaml file, showing port and the hostname which will also make up the url to hit the service.

Monday, June 8, 2020

How to get kubeconfig file from a k8s cluster running on IBMCloud

Once your k8s cluster is up running, follow the below steps to get kubeconfig file.


1. install ibmcloud CLI tools if you do not already have it
   curl -sL https://ibm.biz/idt-installer | bash

2. install ibmcloud CLI ks plugin
   ibmcloud plugin install kubernetes-service

3. login to the account
   ibmcloud login -a cloud.ibm.com -r us-south --sso
   -u <id@us.ibm.com> -p <password> -g Default

this step will ask you to get a one-time passcode by providing a url you can copy and paste the url to a browser to get one-time passcode to proceed. Once you provide the passcode, it will ask you to select an account to continue.

4. retrieve the kubeconfig file, the kubeconfig file should be saved at the current directory.
   ibmcloud ks cluster config --cluster <cluster-id>

Note:
* To check what plugins are available for CLI, run the following command
ibmcloud plugin repo-plugins -r 'IBM Cloud'

** The kubeconfig should normally reference a cert .pem file, when move this kubeconfig file to another environment, the referenced pem file should also be moved.

Monday, May 4, 2020

Resync with upstream git repo

To complete resynch with upstream repo, do the following:

1. git remote add upstream https://github.com/hyperledger-cicd/cello.git
2. git fetch upstream
3. git reset --hard upstream/master
4. git checkout master
5. git merge upstream/master
6. git push --force

Once this is done, your forked repo should be completely insynch with the upstream repo. the word upstream in this case is just a name for the upstream repo, it can be named any thing you like. If you are not using the word upstream, you will have to make sure that the following on commands using the correct upstream name.

Friday, May 1, 2020

Improve bash script with parameter

# parameter should be the component name such as api, dashboard or ansibleagent etc
api=("src/api-engine/"
     "build_image/docker/common/api-engine")
dashboard=("src/dashboard/"
     "build_image/docker/common/dashboard")
ansibleagent=("src/agent/ansible/"
     "build_image/docker/agent/ansible")


cdir=$(pwd)
cd ~/hl/src/github.com/realcello
allchanges=$(git diff --name-only HEAD~15 HEAD~20)
cd $cdir

echo "$allchanges"
hasTopChanges=$(echo "$allchanges" | grep -v '/')
if [ ! -z "$hasTopChanges" ]; then
  echo 'Has changes at the root, need to proceed'
  exit 0
fi

comp=$1
compItems="${comp}[@]"
changesFound='False'
echo 'Items in '${comp}
for item in "${!compItems}"; do
  echo "   checking $item"
  if echo "${allchanges}" | grep -q "$item"
  then
    changesFound='True'
  fi
done

if [ "$changesFound" == 'True' ]; then
  echo 'Need to proceed'
else
  echo "##vso[task.complete result=Succeeded;]No changes found!"
fi

Script to check if changes have happened under a particular directory

api=("src/api-engine/"
     "build_image/docker/common/api-engine")
dashboard=("src/dashboard/"
     "build_image/docker/common/dashboard")
ansibleagent=("src/agent/ansible/"
     "build_image/docker/agent/ansible")

allcomps=("api" "dashboard" "ansibleagent")
declare -A OPS

cdir=$(pwd)
cd ~/hl/src/github.com/realcello
allchanges=$(git diff --name-only HEAD~1 HEAD~10)
cd $cdir

echo "$allchanges"

for value in ${allcomps[@]}; do
  comps="${value}[@]"
  echo 'Items in '${value}
  for item in "${!comps}"; do
    echo "   checking $item"
    if echo "${allchanges}" | grep -q "$item"
    then
      OPS["${value}"]='true'
    fi
  done
done

for item in "${allcomps[@]}"; do
  echo "$item"="${OPS[$item]}"
done

Tuesday, March 17, 2020

The script to start and stop wordpress using docker

<code>
#!/bin/bash
netexist=$(docker network ls --format "{{.Name}}" -f "name=rccc")
if [[ $1 == "start" ]]; then
  if [[ -z $netexist ]]; then
    docker network create rccc
  fi
  docker run -d --name mysql --network rccc \
    -v $(pwd)/data:/var/lib/mysql \
    -e MYSQL_ROOT_PASSWORD=mysecret \
    -e MYSQL_DATABASE=wordpress \
    -e MYSQL_USER=tongli -e MYSQL_PASSWORD=secret \
    mysql:latest mysqld --default-authentication-plugin=mysql_native_password

  sleep 3

  while : ; do
    res=$(docker logs mysql 2>&1 | grep 'ready for connections')
      if [[ ! -z $res ]]; then
        break
      fi
      echo 'Waiting for mysql to be ready...'
      sleep 3
  done

  docker run -d --name rcccsite --network rccc \
    -v $(pwd)/content:/var/www/html \
    -e WORDPRESS_DB_HOST=mysql \
    -e WORDPRESS_DB_USER=tongli \
    -e WORDPRESS_DB_PASSWORD=secret \
    -e WORDPRESS_DB_NAME=wordpress \
    -p 8080:80 wordpress:latest

elif [[ $1 == "stop" ]]; then
  docker rm -f rcccsite mysql
  if [[ $netexist == "rccc" ]]; then
    docker network rm rccc
  fi
fi
</code>

Wednesday, March 11, 2020

IBM Blockchain Platform vscode extension wallet structure

File directory structure:

<root>/
    Admin/
        <hashcode>-priv
        <hashcode>-pub
        Admin -- this is a json file with the following structure
           {"name":"Admin",
             "mspid":"org0examplecom",
             "role": null, "affiliation": "",
             "enrollmentSecret": "",
             "enrollment": {
                  "signingIdentity": <hash, which used as the priv and pub name>,
                  "identity": {"certificate": "The plan new line contained certificate"}
             }
           }

Monday, March 9, 2020

New Go chaincode header import

import (
    "encoding/base64"
    "encoding/json"
    "fmt"
    "github.com/golang/protobuf/proto"
    "github.com/hyperledger/fabric-chaincode-go/shim"
    "github.com/hyperledger/fabric-protos-go/common"
    "github.com/hyperledger/fabric-protos-go/msp"
    pb "github.com/hyperledger/fabric-protos-go/peer"
    "math/rand"
    "time"
)

Thursday, February 27, 2020

The real process to join an org and peer to an existing channel

1. The new org has to be added into an application channel.
2. Once the new org is part of the application channel, the new org admin can add its peers to the channel by following the below steps:

      a). Use peer channel fetch oldest to get the oldest block which is the genesis block
      b). Then use the genesis block to join the peer.

Wednesday, February 26, 2020

Disable ubuntu daily update service

sudo apt-get remove unattended-upgrades
sudo systemctl stop apt-daily.timer
sudo systemctl disable apt-daily.timer
sudo systemctl disable apt-daily.service
sudo systemctl daemon-reload

Set static IP for ubuntu servers

Edit the file in /etc/netplan/50-cloud-init.yaml file like the following

network:
    ethernets:
        enp0s3:
            dhcp4: false
            addresses: [192.168.56.101/24]
            nameservers:
                addresses: [8.8.8.8, 8.8.4.4]
        enp0s8:
            addresses: []
            dhcp4: true
            optional: true
    version: 2

Thursday, February 20, 2020

Using hyperledger discover service


# This assumes the following information available
1. a user's private key, public key and its belonging org mspid
  2. a peer's tlsca cert and its endpoint
 
# Produce the conf.yaml file based on the information provided
discover --configFile conf.yaml --peerTLSCA ca.crt \
--userKey priv_sk --userCert admin.pem --MSP org0examplecom saveConfig

# Discover peers
discover --configFile conf.yaml peers --channel mychannel \
--server peer1.org0.example.com:7051

# Discover config
discover --configFile conf.yaml config --channel mychannel \
--server peer1.org0.example.com:7051

# Discover endorsers for a chaincode
discover --configFile conf.yaml endorsers --channel mychannel \
--chaincode simple --server peer1.org0.example.com:7051 
 
# Choose not to use config file, pass necessary info as command line parameters
discover peers --peerTLSCA ca.crt --userKey priv_sk --userCert admin.pem \
--MSP org0examplecom --channel mychannel --server peer1.org0.example.com:7051
 
discover config --peerTLSCA ca.crt --userKey priv_sk --userCert admin.pem \
--MSP org0examplecom --channel mychannel --server peer1.org0.example.com:7051
 
discover endorsers --peerTLSCA ca.crt --userKey priv_sk --userCert admin.pem \
--MSP org0examplecom --channel mychannel --server peer1.org0.example.com:7051 \
--chaincode simple