Skip to content

Commit

Permalink
Merge pull request #4897 from connext/main
Browse files Browse the repository at this point in the history
ci: staging sync
  • Loading branch information
preethamr authored Sep 14, 2023
2 parents cceb3d3 + 09a543f commit 2cd82d5
Show file tree
Hide file tree
Showing 60 changed files with 66,979 additions and 16,817 deletions.
10 changes: 5 additions & 5 deletions .github/workflows/build-test-deploy.yml
Original file line number Diff line number Diff line change
Expand Up @@ -64,11 +64,11 @@ jobs:
- name: Check Yarn version
run: yarn --version

- name: Validate using commitlint
if: github.ref != 'refs/heads/testnet-prod' || github.ref != 'refs/heads/prod'
uses: wagoid/commitlint-github-action@v5
with:
commitDepth: 1
# - name: Validate using commitlint
# if: github.ref != 'refs/heads/testnet-prod' || github.ref != 'refs/heads/prod'
# uses: wagoid/commitlint-github-action@v5
# with:
# commitDepth: 1

- name: Yarn install
run: yarn install
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@

## Connext Architecture

The Connext architecture can be seen as a layered system, as follows.
The Connext architecture can be seen as a layered system, as follows:

| Layer | Protocol/Stakeholders |
| -------------------------------- | -------------------------------------- |
Expand Down
2 changes: 1 addition & 1 deletion ops/mainnet/prod/backend/config.tf
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ locals {
providers = ["https://rpc.ankr.com/polygon"]
}
"1634886255" = {
providers = ["https://rpc.ankr.com/arbitrum "]
providers = ["https://rpc.ankr.com/arbitrum"]
}
"6450786" = {
providers = ["https://rpc.ankr.com/bsc"]
Expand Down
2 changes: 1 addition & 1 deletion ops/mainnet/prod/backend/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,7 @@ module "cartographer-transfers-lambda-cron" {
stage = var.stage
container_env_vars = merge(local.cartographer_env_vars, { CARTOGRAPHER_SERVICE = "transfers" })
schedule_expression = "rate(1 minute)"
memory_size = 1024
memory_size = 2048
}

module "cartographer-messages-lambda-cron" {
Expand Down
2 changes: 1 addition & 1 deletion ops/mainnet/prod/core/config.tf
Original file line number Diff line number Diff line change
Expand Up @@ -290,7 +290,7 @@ locals {
logLevel = "debug"
chains = {
"6648936" = {
providers = ["https://eth-mainnet.alchemyapi.io/v2/${var.mainnet_alchemy_key_0}", "https://eth-mainnet.blastapi.io/${var.blast_key}", "https://eth.llamarpc.com"]
providers = ["https://eth-mainnet.alchemyapi.io/v2/${var.mainnet_alchemy_key_1}", "https://eth-mainnet.blastapi.io/${var.blast_key}", "https://eth.llamarpc.com"]
},
"1869640809" = {
providers = ["https://optimism-mainnet.blastapi.io/${var.blast_key}", "https://mainnet.optimism.io"]
Expand Down
19 changes: 2 additions & 17 deletions ops/mainnet/prod/core/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -342,7 +342,7 @@ module "lighthouse_prover_subscriber" {
ingress_cdir_blocks = ["0.0.0.0/0"]
ingress_ipv6_cdir_blocks = []
service_security_groups = flatten([module.network.allow_all_sg, module.network.ecs_task_sg])
cert_arn = var.certificate_arn_testnet
cert_arn = var.certificate_arn
container_env_vars = concat(local.lighthouse_prover_subscriber_env_vars, [{ name = "LIGHTHOUSE_SERVICE", value = "prover-sub" }])
}
module "lighthouse_prover_subscriber_auto_scaling" {
Expand All @@ -358,21 +358,6 @@ module "lighthouse_prover_subscriber_auto_scaling" {
avg_mem_utilization_target = 15
}

module "lighthouse_prover_cron" {
source = "../../../modules/lambda"
ecr_repository_name = "nxtp-lighthouse"
docker_image_tag = var.lighthouse_image_tag
container_family = "lighthouse-prover"
environment = var.environment
stage = var.stage
container_env_vars = merge(local.lighthouse_env_vars, {
LIGHTHOUSE_SERVICE = "prover"
})
schedule_expression = "rate(30 minutes)"
memory_size = 4096
timeout = 900
}

module "lighthouse_process_from_root_cron" {
source = "../../../modules/lambda"
ecr_repository_name = "nxtp-lighthouse"
Expand Down Expand Up @@ -407,7 +392,7 @@ module "lighthouse_sendoutboundroot_cron" {
stage = var.stage
container_env_vars = merge(local.lighthouse_env_vars, { LIGHTHOUSE_SERVICE = "sendoutboundroot" })
schedule_expression = "rate(120 minutes)"
memory_size = 512
memory_size = 2048
}

module "lighthouse_propose_cron" {
Expand Down
4 changes: 2 additions & 2 deletions ops/testnet/prod/core/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ module "router_subscriber" {
loadbalancer_port = 80
cpu = 512
memory = 1024
instance_count = 3
instance_count = 6
timeout = 180
ingress_cdir_blocks = ["0.0.0.0/0"]
ingress_ipv6_cdir_blocks = []
Expand Down Expand Up @@ -508,7 +508,7 @@ module "sequencer_cache" {
sg_id = module.network.ecs_task_sg
vpc_id = module.network.vpc_id
cache_subnet_group_subnet_ids = module.network.public_subnets
node_type = "cache.t2.medium"
node_type = "cache.r4.large"
public_redis = true
}

Expand Down
36 changes: 4 additions & 32 deletions packages/adapters/cache/src/lib/caches/auctions.ts
Original file line number Diff line number Diff line change
Expand Up @@ -130,42 +130,14 @@ export class AuctionsCache extends Cache {
return await this.data.hset(`${this.prefix}:status`, transferId, JSON.stringify(currrentStatus));
}

/// MARK - Queued Transfers
/**
* Retrieve all transfer IDs that have the ExecStatus.Enqueued status.
* @returns An array of transfer IDs.
*/
public async getQueuedTransfers(): Promise<string[]> {
const stream = this.data.hscanStream(`${this.prefix}:status`);
const keys: string[] = [];
await new Promise((res) => {
stream.on("data", (resultKeys: string[] = []) => {
// Note that resultKeys will sometimes contain duplicates due to SCAN's implementation in Redis
// link : https://redis.io/commands/scan/#scan-guarantees
for (const resultKey of resultKeys) {
if (!keys.includes(resultKey)) keys.push(resultKey);
}
});
stream.on("end", async () => {
res(undefined);
});
});
const filtered: string[] = [];
for (const key of keys) {
const status = await this.getExecStatus(key);
if (status === ExecStatus.Enqueued) {
filtered.push(key);
}
}
return filtered;
}

/**
* Removes all the auction data for a given transferId.
* Removes all the auction data/status for a given transferId.
* @param transferId - The transferId to be removed
*/
public async pruneAuctionData(transferId: string): Promise<void> {
const dataKey = `${this.prefix}:data`;
const dataKey = `${this.prefix}:auction`;
await this.data.hdel(dataKey, transferId);
const statusKey = `${this.prefix}:status`;
await this.data.hdel(statusKey, transferId);
}
}
45 changes: 0 additions & 45 deletions packages/adapters/cache/test/lib/caches/auctions.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -255,50 +255,5 @@ describe("AuctionCache", () => {
expect(secondCallTimestamp).to.not.be.eq(timestamp);
});
});

describe("#getQueuedTransfers", () => {
const mockTransferIdBatch = (count: number) => new Array(count).fill(0).map(() => getRandomBytes32());

it("happy: should retrieve existing queued transfers", async () => {
const transferIds = mockTransferIdBatch(10);
for (const transferId of transferIds) {
await mockRedisHelpers.setExecStatus(transferId, ExecStatus.Enqueued);
}

const res = await cache.getQueuedTransfers();
expect(res).to.deep.eq(transferIds);
});

it("should not retrieve transfers of other statuses", async () => {
const queuedTransferIds = mockTransferIdBatch(10);
for (const transferId of queuedTransferIds) {
await mockRedisHelpers.setExecStatus(transferId, ExecStatus.Enqueued);
}

// Simulate: a lot have been sent already.
const sentTransferIds = mockTransferIdBatch(1234);
for (const transferId of sentTransferIds) {
await mockRedisHelpers.setExecStatus(transferId, ExecStatus.Sent);
}

const res = await cache.getQueuedTransfers();
expect(res).to.deep.eq(queuedTransferIds);
});

it("should return empty array if no transfers have queued status", async () => {
const sentTransferIds = mockTransferIdBatch(27);
for (const transferId of sentTransferIds) {
await mockRedisHelpers.setExecStatus(transferId, ExecStatus.Sent);
}

const res = await cache.getQueuedTransfers();
expect(res).to.deep.eq([]);
});

it("sad: should return empty array if no queued transfers exist", async () => {
const res = await cache.getQueuedTransfers();
expect(res).to.deep.eq([]);
});
});
});
});
47 changes: 47 additions & 0 deletions packages/adapters/database/db/schema.sql
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,27 @@ SET xmloption = content;
SET client_min_messages = warning;
SET row_security = off;

--
-- Name: pg_cron; Type: EXTENSION; Schema: -; Owner: -
--

CREATE EXTENSION IF NOT EXISTS pg_cron WITH SCHEMA pg_catalog;


--
-- Name: EXTENSION pg_cron; Type: COMMENT; Schema: -; Owner: -
--

COMMENT ON EXTENSION pg_cron IS 'Job scheduler for PostgreSQL';


--
-- Name: public; Type: SCHEMA; Schema: -; Owner: -
--

-- *not* creating schema, since initdb creates it


--
-- Name: action_type; Type: TYPE; Schema: public; Owner: -
--
Expand Down Expand Up @@ -1366,6 +1387,32 @@ ALTER TABLE ONLY public.asset_balances
ADD CONSTRAINT fk_router FOREIGN KEY (router_address) REFERENCES public.routers(address);


--
-- Name: job cron_job_policy; Type: POLICY; Schema: cron; Owner: -
--

CREATE POLICY cron_job_policy ON cron.job USING ((username = CURRENT_USER));


--
-- Name: job_run_details cron_job_run_details_policy; Type: POLICY; Schema: cron; Owner: -
--

CREATE POLICY cron_job_run_details_policy ON cron.job_run_details USING ((username = CURRENT_USER));


--
-- Name: job; Type: ROW SECURITY; Schema: cron; Owner: -
--

ALTER TABLE cron.job ENABLE ROW LEVEL SECURITY;

--
-- Name: job_run_details; Type: ROW SECURITY; Schema: cron; Owner: -
--

ALTER TABLE cron.job_run_details ENABLE ROW LEVEL SECURITY;

--
-- PostgreSQL database dump complete
--
Expand Down
Loading

0 comments on commit 2cd82d5

Please sign in to comment.