2222# in envoy_router.template.json must be specified here. It is a dictionary of dictionaries.
2323# Options can be specified for each cluster if needed. See make_route_internal() in
2424# routing_helper.template.json for the types of options supported.
25- front_envoy_clusters = {
26- 'service1' : {},
27- 'service2' : {},
28- 'service3' : {},
29- 'ratelimit' : {}
30- }
25+ front_envoy_clusters = {'service1' : {}, 'service2' : {}, 'service3' : {}, 'ratelimit' : {}}
3126
3227# This is the set of internal services that local Envoys will route to. All services that will be
3328# accessed via the 9001 egress port need to be listed here. It is a dictionary of dictionaries.
4641# we demonstrate setting up proxying for DynamoDB. In the config, this ends up using the HTTP
4742# DynamoDB statistics filter, as well as generating a special access log which includes the
4843# X-AMZN-RequestId response header.
49- external_virtual_hosts = [
50- {
51- 'name' : 'dynamodb_iad' ,
52- 'address' : "127.0.0.1" ,
53- 'protocol' :"TCP" ,
54- 'port_value' :"9204" ,
55- 'hosts' : [
56- {
57- 'name' : 'dynamodb_iad' , 'domain' : '*' ,
58- 'remote_address' : 'dynamodb.us-east-1.amazonaws.com' ,
59- 'protocol' : 'TCP' ,
60- 'port_value' : '443' ,
61- 'verify_subject_alt_name' : [ 'dynamodb.us-east-1.amazonaws.com' ],
62- 'ssl' : True
63- }
64- ],
65- 'is_amzn_service' : True ,
66- 'cluster_type' : 'logical_dns'
44+ external_virtual_hosts = [{
45+ 'name' :
46+ 'dynamodb_iad' ,
47+ 'address' :
48+ "127.0.0.1" ,
49+ 'protocol' :
50+ "TCP" ,
51+ 'port_value' :
52+ "9204" ,
53+ 'hosts' : [{
54+ 'name' : 'dynamodb_iad' ,
55+ 'domain' : '*' ,
56+ 'remote_address' : 'dynamodb.us-east-1.amazonaws.com' ,
57+ 'protocol' : 'TCP' ,
58+ 'port_value' : '443' ,
59+ 'verify_subject_alt_name' : ['dynamodb.us-east-1.amazonaws.com' ],
60+ 'ssl' : True
61+ }],
62+ 'is_amzn_service' :
63+ True ,
64+ 'cluster_type' :
65+ 'logical_dns'
6766}]
6867
6968# This is the set of mongo clusters that local Envoys can talk to. Each database defines a set of
7372# as it demonstrates how to setup TCP proxy and the network rate limit filter.
7473mongos_servers = {
7574 'somedb' : {
76- 'address' : "127.0.0.1" ,
77- 'protocol' : "TCP" ,
78- 'port_value' : 27019 ,
75+ 'address' :
76+ "127.0.0.1" ,
77+ 'protocol' :
78+ "TCP" ,
79+ 'port_value' :
80+ 27019 ,
7981 'hosts' : [
80- {'port_value' : 27817 , 'address' :'router1.yourcompany.net' , 'protocol' : 'TCP' },
81- {'port_value' : 27817 , 'address' :'router2.yourcompany.net' , 'protocol' : 'TCP' },
82- {'port_value' : 27817 , 'address' :'router3.yourcompany.net' , 'protocol' : 'TCP' },
83- {'port_value' : 27817 , 'address' :'router4.yourcompany.net' , 'protocol' : 'TCP' },
82+ {
83+ 'port_value' : 27817 ,
84+ 'address' : 'router1.yourcompany.net' ,
85+ 'protocol' : 'TCP'
86+ },
87+ {
88+ 'port_value' : 27817 ,
89+ 'address' : 'router2.yourcompany.net' ,
90+ 'protocol' : 'TCP'
91+ },
92+ {
93+ 'port_value' : 27817 ,
94+ 'address' : 'router3.yourcompany.net' ,
95+ 'protocol' : 'TCP'
96+ },
97+ {
98+ 'port_value' : 27817 ,
99+ 'address' : 'router4.yourcompany.net' ,
100+ 'protocol' : 'TCP'
101+ },
84102 ],
85- 'ratelimit' : True
103+ 'ratelimit' :
104+ True
86105 }
87106}
88107
108+
89109def generate_config (template_path , template , output_file , ** context ):
90- """ Generate a final config file based on a template and some context. """
91- env = jinja2 .Environment (loader = jinja2 .FileSystemLoader (template_path , followlinks = True ),
92- undefined = jinja2 .StrictUndefined )
93- raw_output = env .get_template (template ).render (** context )
94- with open (output_file , 'w' ) as fh :
95- fh .write (raw_output )
110+ """ Generate a final config file based on a template and some context. """
111+ env = jinja2 .Environment (
112+ loader = jinja2 .FileSystemLoader (template_path , followlinks = True ),
113+ undefined = jinja2 .StrictUndefined )
114+ raw_output = env .get_template (template ).render (** context )
115+ with open (output_file , 'w' ) as fh :
116+ fh .write (raw_output )
117+
96118
97119# Generate a demo config for the main front proxy. This sets up both HTTP and HTTPS listeners,
98120# as well as a listener for the double proxy to connect to via SSL client authentication.
99- generate_config (SCRIPT_DIR , 'envoy_front_proxy_v2.template.yaml' ,
100- '{}/envoy_front_proxy.v2.yaml' .format (OUT_DIR ), clusters = front_envoy_clusters )
121+ generate_config (
122+ SCRIPT_DIR ,
123+ 'envoy_front_proxy_v2.template.yaml' ,
124+ '{}/envoy_front_proxy.v2.yaml' .format (OUT_DIR ),
125+ clusters = front_envoy_clusters )
101126
102127# Generate a demo config for the double proxy. This sets up both an HTTP and HTTPS listeners,
103128# and backhauls the traffic to the main front proxy.
@@ -112,11 +137,13 @@ def generate_config(template_path, template, output_file, **context):
112137# optional external service ports: built from external_virtual_hosts above. Each external host
113138# that Envoy proxies to listens on its own port.
114139# optional mongo ports: built from mongos_servers above.
115- generate_config (SCRIPT_DIR , 'envoy_service_to_service_v2.template.yaml' ,
116- '{}/envoy_service_to_service.yaml' .format (OUT_DIR ),
117- internal_virtual_hosts = service_to_service_envoy_clusters ,
118- external_virtual_hosts = external_virtual_hosts ,
119- mongos_servers = mongos_servers )
140+ generate_config (
141+ SCRIPT_DIR ,
142+ 'envoy_service_to_service_v2.template.yaml' ,
143+ '{}/envoy_service_to_service.yaml' .format (OUT_DIR ),
144+ internal_virtual_hosts = service_to_service_envoy_clusters ,
145+ external_virtual_hosts = external_virtual_hosts ,
146+ mongos_servers = mongos_servers )
120147
121148for google_ext in ['v2.yaml' ]:
122149 shutil .copy (os .path .join (SCRIPT_DIR , 'google_com_proxy.%s' % google_ext ), OUT_DIR )
0 commit comments