xds: bump envoy to 6ff0bce8ff417a252cde4d04dfb9cba2bab463d8

Also add import script for upda protos from https://github.com/cncf/udpa

This PR is for the purpose to sync up with internal cl/265717410
This commit is contained in:
ZHANG Dapeng 2019-08-28 12:58:09 -07:00 committed by GitHub
parent c240f27077
commit 0bc659cfdd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
30 changed files with 411 additions and 2062 deletions

View File

@ -59,3 +59,13 @@ which can be obtained at:
* https://github.com/lyft/protoc-gen-validate
* LOCATION_IN_GRPC:
* xds/third_party/protoc-gen-validate
This product contains a modified portion of 'udpa',
an open source universal data plane API, which can be obtained at:
* LICENSE:
* xds/third_party/udpa/LICENSE (Apache License 2.0)
* HOMEPAGE:
* https://github.com/cncf/udpa
* LOCATION_IN_GRPC:
* xds/third_party/udpa

View File

@ -49,6 +49,7 @@ sourceSets {
srcDir 'third_party/envoy/src/main/proto'
srcDir 'third_party/gogoproto/src/main/proto'
srcDir 'third_party/protoc-gen-validate/src/main/proto'
srcDir 'third_party/udpa/src/main/proto'
}
}
}
@ -64,6 +65,7 @@ shadowJar {
dependencies {
include(project(':grpc-xds'))
}
relocate 'com.github.udpa', 'io.grpc.xds.shaded.com.github.udpa'
relocate 'io.envoyproxy', 'io.grpc.xds.shaded.io.envoyproxy'
relocate 'validate', 'io.grpc.xds.shaded.com.lyft.pgv.validate'
exclude "**/*.proto"

View File

@ -1,4 +1,4 @@
package io.envoyproxy.udpa.service.orca.v1;
package com.github.udpa.udpa.service.orca.v1;
import static io.grpc.MethodDescriptor.generateFullMethodName;
import static io.grpc.stub.ClientCalls.asyncBidiStreamingCall;
@ -37,29 +37,29 @@ public final class OpenRcaServiceGrpc {
public static final String SERVICE_NAME = "udpa.service.orca.v1.OpenRcaService";
// Static method descriptors that strictly reflect the proto.
private static volatile io.grpc.MethodDescriptor<io.envoyproxy.udpa.service.orca.v1.OrcaLoadReportRequest,
io.envoyproxy.udpa.data.orca.v1.OrcaLoadReport> getStreamCoreMetricsMethod;
private static volatile io.grpc.MethodDescriptor<com.github.udpa.udpa.service.orca.v1.OrcaLoadReportRequest,
com.github.udpa.udpa.data.orca.v1.OrcaLoadReport> getStreamCoreMetricsMethod;
@io.grpc.stub.annotations.RpcMethod(
fullMethodName = SERVICE_NAME + '/' + "StreamCoreMetrics",
requestType = io.envoyproxy.udpa.service.orca.v1.OrcaLoadReportRequest.class,
responseType = io.envoyproxy.udpa.data.orca.v1.OrcaLoadReport.class,
requestType = com.github.udpa.udpa.service.orca.v1.OrcaLoadReportRequest.class,
responseType = com.github.udpa.udpa.data.orca.v1.OrcaLoadReport.class,
methodType = io.grpc.MethodDescriptor.MethodType.SERVER_STREAMING)
public static io.grpc.MethodDescriptor<io.envoyproxy.udpa.service.orca.v1.OrcaLoadReportRequest,
io.envoyproxy.udpa.data.orca.v1.OrcaLoadReport> getStreamCoreMetricsMethod() {
io.grpc.MethodDescriptor<io.envoyproxy.udpa.service.orca.v1.OrcaLoadReportRequest, io.envoyproxy.udpa.data.orca.v1.OrcaLoadReport> getStreamCoreMetricsMethod;
public static io.grpc.MethodDescriptor<com.github.udpa.udpa.service.orca.v1.OrcaLoadReportRequest,
com.github.udpa.udpa.data.orca.v1.OrcaLoadReport> getStreamCoreMetricsMethod() {
io.grpc.MethodDescriptor<com.github.udpa.udpa.service.orca.v1.OrcaLoadReportRequest, com.github.udpa.udpa.data.orca.v1.OrcaLoadReport> getStreamCoreMetricsMethod;
if ((getStreamCoreMetricsMethod = OpenRcaServiceGrpc.getStreamCoreMetricsMethod) == null) {
synchronized (OpenRcaServiceGrpc.class) {
if ((getStreamCoreMetricsMethod = OpenRcaServiceGrpc.getStreamCoreMetricsMethod) == null) {
OpenRcaServiceGrpc.getStreamCoreMetricsMethod = getStreamCoreMetricsMethod =
io.grpc.MethodDescriptor.<io.envoyproxy.udpa.service.orca.v1.OrcaLoadReportRequest, io.envoyproxy.udpa.data.orca.v1.OrcaLoadReport>newBuilder()
io.grpc.MethodDescriptor.<com.github.udpa.udpa.service.orca.v1.OrcaLoadReportRequest, com.github.udpa.udpa.data.orca.v1.OrcaLoadReport>newBuilder()
.setType(io.grpc.MethodDescriptor.MethodType.SERVER_STREAMING)
.setFullMethodName(generateFullMethodName(SERVICE_NAME, "StreamCoreMetrics"))
.setSampledToLocalTracing(true)
.setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
io.envoyproxy.udpa.service.orca.v1.OrcaLoadReportRequest.getDefaultInstance()))
com.github.udpa.udpa.service.orca.v1.OrcaLoadReportRequest.getDefaultInstance()))
.setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
io.envoyproxy.udpa.data.orca.v1.OrcaLoadReport.getDefaultInstance()))
com.github.udpa.udpa.data.orca.v1.OrcaLoadReport.getDefaultInstance()))
.setSchemaDescriptor(new OpenRcaServiceMethodDescriptorSupplier("StreamCoreMetrics"))
.build();
}
@ -107,8 +107,8 @@ public final class OpenRcaServiceGrpc {
/**
*/
public void streamCoreMetrics(io.envoyproxy.udpa.service.orca.v1.OrcaLoadReportRequest request,
io.grpc.stub.StreamObserver<io.envoyproxy.udpa.data.orca.v1.OrcaLoadReport> responseObserver) {
public void streamCoreMetrics(com.github.udpa.udpa.service.orca.v1.OrcaLoadReportRequest request,
io.grpc.stub.StreamObserver<com.github.udpa.udpa.data.orca.v1.OrcaLoadReport> responseObserver) {
asyncUnimplementedUnaryCall(getStreamCoreMetricsMethod(), responseObserver);
}
@ -118,8 +118,8 @@ public final class OpenRcaServiceGrpc {
getStreamCoreMetricsMethod(),
asyncServerStreamingCall(
new MethodHandlers<
io.envoyproxy.udpa.service.orca.v1.OrcaLoadReportRequest,
io.envoyproxy.udpa.data.orca.v1.OrcaLoadReport>(
com.github.udpa.udpa.service.orca.v1.OrcaLoadReportRequest,
com.github.udpa.udpa.data.orca.v1.OrcaLoadReport>(
this, METHODID_STREAM_CORE_METRICS)))
.build();
}
@ -155,8 +155,8 @@ public final class OpenRcaServiceGrpc {
/**
*/
public void streamCoreMetrics(io.envoyproxy.udpa.service.orca.v1.OrcaLoadReportRequest request,
io.grpc.stub.StreamObserver<io.envoyproxy.udpa.data.orca.v1.OrcaLoadReport> responseObserver) {
public void streamCoreMetrics(com.github.udpa.udpa.service.orca.v1.OrcaLoadReportRequest request,
io.grpc.stub.StreamObserver<com.github.udpa.udpa.data.orca.v1.OrcaLoadReport> responseObserver) {
asyncServerStreamingCall(
getChannel().newCall(getStreamCoreMetricsMethod(), getCallOptions()), request, responseObserver);
}
@ -192,8 +192,8 @@ public final class OpenRcaServiceGrpc {
/**
*/
public java.util.Iterator<io.envoyproxy.udpa.data.orca.v1.OrcaLoadReport> streamCoreMetrics(
io.envoyproxy.udpa.service.orca.v1.OrcaLoadReportRequest request) {
public java.util.Iterator<com.github.udpa.udpa.data.orca.v1.OrcaLoadReport> streamCoreMetrics(
com.github.udpa.udpa.service.orca.v1.OrcaLoadReportRequest request) {
return blockingServerStreamingCall(
getChannel(), getStreamCoreMetricsMethod(), getCallOptions(), request);
}
@ -248,8 +248,8 @@ public final class OpenRcaServiceGrpc {
public void invoke(Req request, io.grpc.stub.StreamObserver<Resp> responseObserver) {
switch (methodId) {
case METHODID_STREAM_CORE_METRICS:
serviceImpl.streamCoreMetrics((io.envoyproxy.udpa.service.orca.v1.OrcaLoadReportRequest) request,
(io.grpc.stub.StreamObserver<io.envoyproxy.udpa.data.orca.v1.OrcaLoadReport>) responseObserver);
serviceImpl.streamCoreMetrics((com.github.udpa.udpa.service.orca.v1.OrcaLoadReportRequest) request,
(io.grpc.stub.StreamObserver<com.github.udpa.udpa.data.orca.v1.OrcaLoadReport>) responseObserver);
break;
default:
throw new AssertionError();
@ -273,7 +273,7 @@ public final class OpenRcaServiceGrpc {
@java.lang.Override
public com.google.protobuf.Descriptors.FileDescriptor getFileDescriptor() {
return io.envoyproxy.udpa.service.orca.v1.OrcaProto.getDescriptor();
return com.github.udpa.udpa.service.orca.v1.OrcaProto.getDescriptor();
}
@java.lang.Override

View File

@ -1,410 +0,0 @@
package io.envoyproxy.envoy.api.v2;
import static io.grpc.MethodDescriptor.generateFullMethodName;
import static io.grpc.stub.ClientCalls.asyncBidiStreamingCall;
import static io.grpc.stub.ClientCalls.asyncClientStreamingCall;
import static io.grpc.stub.ClientCalls.asyncServerStreamingCall;
import static io.grpc.stub.ClientCalls.asyncUnaryCall;
import static io.grpc.stub.ClientCalls.blockingServerStreamingCall;
import static io.grpc.stub.ClientCalls.blockingUnaryCall;
import static io.grpc.stub.ClientCalls.futureUnaryCall;
import static io.grpc.stub.ServerCalls.asyncBidiStreamingCall;
import static io.grpc.stub.ServerCalls.asyncClientStreamingCall;
import static io.grpc.stub.ServerCalls.asyncServerStreamingCall;
import static io.grpc.stub.ServerCalls.asyncUnaryCall;
import static io.grpc.stub.ServerCalls.asyncUnimplementedStreamingCall;
import static io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall;
/**
* <pre>
* Return list of all clusters this proxy will load balance to.
* </pre>
*/
@javax.annotation.Generated(
value = "by gRPC proto compiler",
comments = "Source: envoy/api/v2/cds.proto")
public final class ClusterDiscoveryServiceGrpc {
private ClusterDiscoveryServiceGrpc() {}
public static final String SERVICE_NAME = "envoy.api.v2.ClusterDiscoveryService";
// Static method descriptors that strictly reflect the proto.
private static volatile io.grpc.MethodDescriptor<io.envoyproxy.envoy.api.v2.DiscoveryRequest,
io.envoyproxy.envoy.api.v2.DiscoveryResponse> getStreamClustersMethod;
@io.grpc.stub.annotations.RpcMethod(
fullMethodName = SERVICE_NAME + '/' + "StreamClusters",
requestType = io.envoyproxy.envoy.api.v2.DiscoveryRequest.class,
responseType = io.envoyproxy.envoy.api.v2.DiscoveryResponse.class,
methodType = io.grpc.MethodDescriptor.MethodType.BIDI_STREAMING)
public static io.grpc.MethodDescriptor<io.envoyproxy.envoy.api.v2.DiscoveryRequest,
io.envoyproxy.envoy.api.v2.DiscoveryResponse> getStreamClustersMethod() {
io.grpc.MethodDescriptor<io.envoyproxy.envoy.api.v2.DiscoveryRequest, io.envoyproxy.envoy.api.v2.DiscoveryResponse> getStreamClustersMethod;
if ((getStreamClustersMethod = ClusterDiscoveryServiceGrpc.getStreamClustersMethod) == null) {
synchronized (ClusterDiscoveryServiceGrpc.class) {
if ((getStreamClustersMethod = ClusterDiscoveryServiceGrpc.getStreamClustersMethod) == null) {
ClusterDiscoveryServiceGrpc.getStreamClustersMethod = getStreamClustersMethod =
io.grpc.MethodDescriptor.<io.envoyproxy.envoy.api.v2.DiscoveryRequest, io.envoyproxy.envoy.api.v2.DiscoveryResponse>newBuilder()
.setType(io.grpc.MethodDescriptor.MethodType.BIDI_STREAMING)
.setFullMethodName(generateFullMethodName(SERVICE_NAME, "StreamClusters"))
.setSampledToLocalTracing(true)
.setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
io.envoyproxy.envoy.api.v2.DiscoveryRequest.getDefaultInstance()))
.setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
io.envoyproxy.envoy.api.v2.DiscoveryResponse.getDefaultInstance()))
.setSchemaDescriptor(new ClusterDiscoveryServiceMethodDescriptorSupplier("StreamClusters"))
.build();
}
}
}
return getStreamClustersMethod;
}
private static volatile io.grpc.MethodDescriptor<io.envoyproxy.envoy.api.v2.DeltaDiscoveryRequest,
io.envoyproxy.envoy.api.v2.DeltaDiscoveryResponse> getDeltaClustersMethod;
@io.grpc.stub.annotations.RpcMethod(
fullMethodName = SERVICE_NAME + '/' + "DeltaClusters",
requestType = io.envoyproxy.envoy.api.v2.DeltaDiscoveryRequest.class,
responseType = io.envoyproxy.envoy.api.v2.DeltaDiscoveryResponse.class,
methodType = io.grpc.MethodDescriptor.MethodType.BIDI_STREAMING)
public static io.grpc.MethodDescriptor<io.envoyproxy.envoy.api.v2.DeltaDiscoveryRequest,
io.envoyproxy.envoy.api.v2.DeltaDiscoveryResponse> getDeltaClustersMethod() {
io.grpc.MethodDescriptor<io.envoyproxy.envoy.api.v2.DeltaDiscoveryRequest, io.envoyproxy.envoy.api.v2.DeltaDiscoveryResponse> getDeltaClustersMethod;
if ((getDeltaClustersMethod = ClusterDiscoveryServiceGrpc.getDeltaClustersMethod) == null) {
synchronized (ClusterDiscoveryServiceGrpc.class) {
if ((getDeltaClustersMethod = ClusterDiscoveryServiceGrpc.getDeltaClustersMethod) == null) {
ClusterDiscoveryServiceGrpc.getDeltaClustersMethod = getDeltaClustersMethod =
io.grpc.MethodDescriptor.<io.envoyproxy.envoy.api.v2.DeltaDiscoveryRequest, io.envoyproxy.envoy.api.v2.DeltaDiscoveryResponse>newBuilder()
.setType(io.grpc.MethodDescriptor.MethodType.BIDI_STREAMING)
.setFullMethodName(generateFullMethodName(SERVICE_NAME, "DeltaClusters"))
.setSampledToLocalTracing(true)
.setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
io.envoyproxy.envoy.api.v2.DeltaDiscoveryRequest.getDefaultInstance()))
.setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
io.envoyproxy.envoy.api.v2.DeltaDiscoveryResponse.getDefaultInstance()))
.setSchemaDescriptor(new ClusterDiscoveryServiceMethodDescriptorSupplier("DeltaClusters"))
.build();
}
}
}
return getDeltaClustersMethod;
}
private static volatile io.grpc.MethodDescriptor<io.envoyproxy.envoy.api.v2.DiscoveryRequest,
io.envoyproxy.envoy.api.v2.DiscoveryResponse> getFetchClustersMethod;
@io.grpc.stub.annotations.RpcMethod(
fullMethodName = SERVICE_NAME + '/' + "FetchClusters",
requestType = io.envoyproxy.envoy.api.v2.DiscoveryRequest.class,
responseType = io.envoyproxy.envoy.api.v2.DiscoveryResponse.class,
methodType = io.grpc.MethodDescriptor.MethodType.UNARY)
public static io.grpc.MethodDescriptor<io.envoyproxy.envoy.api.v2.DiscoveryRequest,
io.envoyproxy.envoy.api.v2.DiscoveryResponse> getFetchClustersMethod() {
io.grpc.MethodDescriptor<io.envoyproxy.envoy.api.v2.DiscoveryRequest, io.envoyproxy.envoy.api.v2.DiscoveryResponse> getFetchClustersMethod;
if ((getFetchClustersMethod = ClusterDiscoveryServiceGrpc.getFetchClustersMethod) == null) {
synchronized (ClusterDiscoveryServiceGrpc.class) {
if ((getFetchClustersMethod = ClusterDiscoveryServiceGrpc.getFetchClustersMethod) == null) {
ClusterDiscoveryServiceGrpc.getFetchClustersMethod = getFetchClustersMethod =
io.grpc.MethodDescriptor.<io.envoyproxy.envoy.api.v2.DiscoveryRequest, io.envoyproxy.envoy.api.v2.DiscoveryResponse>newBuilder()
.setType(io.grpc.MethodDescriptor.MethodType.UNARY)
.setFullMethodName(generateFullMethodName(SERVICE_NAME, "FetchClusters"))
.setSampledToLocalTracing(true)
.setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
io.envoyproxy.envoy.api.v2.DiscoveryRequest.getDefaultInstance()))
.setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
io.envoyproxy.envoy.api.v2.DiscoveryResponse.getDefaultInstance()))
.setSchemaDescriptor(new ClusterDiscoveryServiceMethodDescriptorSupplier("FetchClusters"))
.build();
}
}
}
return getFetchClustersMethod;
}
/**
* Creates a new async stub that supports all call types for the service
*/
public static ClusterDiscoveryServiceStub newStub(io.grpc.Channel channel) {
return new ClusterDiscoveryServiceStub(channel);
}
/**
* Creates a new blocking-style stub that supports unary and streaming output calls on the service
*/
public static ClusterDiscoveryServiceBlockingStub newBlockingStub(
io.grpc.Channel channel) {
return new ClusterDiscoveryServiceBlockingStub(channel);
}
/**
* Creates a new ListenableFuture-style stub that supports unary calls on the service
*/
public static ClusterDiscoveryServiceFutureStub newFutureStub(
io.grpc.Channel channel) {
return new ClusterDiscoveryServiceFutureStub(channel);
}
/**
* <pre>
* Return list of all clusters this proxy will load balance to.
* </pre>
*/
public static abstract class ClusterDiscoveryServiceImplBase implements io.grpc.BindableService {
/**
*/
public io.grpc.stub.StreamObserver<io.envoyproxy.envoy.api.v2.DiscoveryRequest> streamClusters(
io.grpc.stub.StreamObserver<io.envoyproxy.envoy.api.v2.DiscoveryResponse> responseObserver) {
return asyncUnimplementedStreamingCall(getStreamClustersMethod(), responseObserver);
}
/**
*/
public io.grpc.stub.StreamObserver<io.envoyproxy.envoy.api.v2.DeltaDiscoveryRequest> deltaClusters(
io.grpc.stub.StreamObserver<io.envoyproxy.envoy.api.v2.DeltaDiscoveryResponse> responseObserver) {
return asyncUnimplementedStreamingCall(getDeltaClustersMethod(), responseObserver);
}
/**
*/
public void fetchClusters(io.envoyproxy.envoy.api.v2.DiscoveryRequest request,
io.grpc.stub.StreamObserver<io.envoyproxy.envoy.api.v2.DiscoveryResponse> responseObserver) {
asyncUnimplementedUnaryCall(getFetchClustersMethod(), responseObserver);
}
@java.lang.Override public final io.grpc.ServerServiceDefinition bindService() {
return io.grpc.ServerServiceDefinition.builder(getServiceDescriptor())
.addMethod(
getStreamClustersMethod(),
asyncBidiStreamingCall(
new MethodHandlers<
io.envoyproxy.envoy.api.v2.DiscoveryRequest,
io.envoyproxy.envoy.api.v2.DiscoveryResponse>(
this, METHODID_STREAM_CLUSTERS)))
.addMethod(
getDeltaClustersMethod(),
asyncBidiStreamingCall(
new MethodHandlers<
io.envoyproxy.envoy.api.v2.DeltaDiscoveryRequest,
io.envoyproxy.envoy.api.v2.DeltaDiscoveryResponse>(
this, METHODID_DELTA_CLUSTERS)))
.addMethod(
getFetchClustersMethod(),
asyncUnaryCall(
new MethodHandlers<
io.envoyproxy.envoy.api.v2.DiscoveryRequest,
io.envoyproxy.envoy.api.v2.DiscoveryResponse>(
this, METHODID_FETCH_CLUSTERS)))
.build();
}
}
/**
* <pre>
* Return list of all clusters this proxy will load balance to.
* </pre>
*/
public static final class ClusterDiscoveryServiceStub extends io.grpc.stub.AbstractStub<ClusterDiscoveryServiceStub> {
private ClusterDiscoveryServiceStub(io.grpc.Channel channel) {
super(channel);
}
private ClusterDiscoveryServiceStub(io.grpc.Channel channel,
io.grpc.CallOptions callOptions) {
super(channel, callOptions);
}
@java.lang.Override
protected ClusterDiscoveryServiceStub build(io.grpc.Channel channel,
io.grpc.CallOptions callOptions) {
return new ClusterDiscoveryServiceStub(channel, callOptions);
}
/**
*/
public io.grpc.stub.StreamObserver<io.envoyproxy.envoy.api.v2.DiscoveryRequest> streamClusters(
io.grpc.stub.StreamObserver<io.envoyproxy.envoy.api.v2.DiscoveryResponse> responseObserver) {
return asyncBidiStreamingCall(
getChannel().newCall(getStreamClustersMethod(), getCallOptions()), responseObserver);
}
/**
*/
public io.grpc.stub.StreamObserver<io.envoyproxy.envoy.api.v2.DeltaDiscoveryRequest> deltaClusters(
io.grpc.stub.StreamObserver<io.envoyproxy.envoy.api.v2.DeltaDiscoveryResponse> responseObserver) {
return asyncBidiStreamingCall(
getChannel().newCall(getDeltaClustersMethod(), getCallOptions()), responseObserver);
}
/**
*/
public void fetchClusters(io.envoyproxy.envoy.api.v2.DiscoveryRequest request,
io.grpc.stub.StreamObserver<io.envoyproxy.envoy.api.v2.DiscoveryResponse> responseObserver) {
asyncUnaryCall(
getChannel().newCall(getFetchClustersMethod(), getCallOptions()), request, responseObserver);
}
}
/**
* <pre>
* Return list of all clusters this proxy will load balance to.
* </pre>
*/
public static final class ClusterDiscoveryServiceBlockingStub extends io.grpc.stub.AbstractStub<ClusterDiscoveryServiceBlockingStub> {
private ClusterDiscoveryServiceBlockingStub(io.grpc.Channel channel) {
super(channel);
}
private ClusterDiscoveryServiceBlockingStub(io.grpc.Channel channel,
io.grpc.CallOptions callOptions) {
super(channel, callOptions);
}
@java.lang.Override
protected ClusterDiscoveryServiceBlockingStub build(io.grpc.Channel channel,
io.grpc.CallOptions callOptions) {
return new ClusterDiscoveryServiceBlockingStub(channel, callOptions);
}
/**
*/
public io.envoyproxy.envoy.api.v2.DiscoveryResponse fetchClusters(io.envoyproxy.envoy.api.v2.DiscoveryRequest request) {
return blockingUnaryCall(
getChannel(), getFetchClustersMethod(), getCallOptions(), request);
}
}
/**
* <pre>
* Return list of all clusters this proxy will load balance to.
* </pre>
*/
public static final class ClusterDiscoveryServiceFutureStub extends io.grpc.stub.AbstractStub<ClusterDiscoveryServiceFutureStub> {
private ClusterDiscoveryServiceFutureStub(io.grpc.Channel channel) {
super(channel);
}
private ClusterDiscoveryServiceFutureStub(io.grpc.Channel channel,
io.grpc.CallOptions callOptions) {
super(channel, callOptions);
}
@java.lang.Override
protected ClusterDiscoveryServiceFutureStub build(io.grpc.Channel channel,
io.grpc.CallOptions callOptions) {
return new ClusterDiscoveryServiceFutureStub(channel, callOptions);
}
/**
*/
public com.google.common.util.concurrent.ListenableFuture<io.envoyproxy.envoy.api.v2.DiscoveryResponse> fetchClusters(
io.envoyproxy.envoy.api.v2.DiscoveryRequest request) {
return futureUnaryCall(
getChannel().newCall(getFetchClustersMethod(), getCallOptions()), request);
}
}
private static final int METHODID_FETCH_CLUSTERS = 0;
private static final int METHODID_STREAM_CLUSTERS = 1;
private static final int METHODID_DELTA_CLUSTERS = 2;
private static final class MethodHandlers<Req, Resp> implements
io.grpc.stub.ServerCalls.UnaryMethod<Req, Resp>,
io.grpc.stub.ServerCalls.ServerStreamingMethod<Req, Resp>,
io.grpc.stub.ServerCalls.ClientStreamingMethod<Req, Resp>,
io.grpc.stub.ServerCalls.BidiStreamingMethod<Req, Resp> {
private final ClusterDiscoveryServiceImplBase serviceImpl;
private final int methodId;
MethodHandlers(ClusterDiscoveryServiceImplBase serviceImpl, int methodId) {
this.serviceImpl = serviceImpl;
this.methodId = methodId;
}
@java.lang.Override
@java.lang.SuppressWarnings("unchecked")
public void invoke(Req request, io.grpc.stub.StreamObserver<Resp> responseObserver) {
switch (methodId) {
case METHODID_FETCH_CLUSTERS:
serviceImpl.fetchClusters((io.envoyproxy.envoy.api.v2.DiscoveryRequest) request,
(io.grpc.stub.StreamObserver<io.envoyproxy.envoy.api.v2.DiscoveryResponse>) responseObserver);
break;
default:
throw new AssertionError();
}
}
@java.lang.Override
@java.lang.SuppressWarnings("unchecked")
public io.grpc.stub.StreamObserver<Req> invoke(
io.grpc.stub.StreamObserver<Resp> responseObserver) {
switch (methodId) {
case METHODID_STREAM_CLUSTERS:
return (io.grpc.stub.StreamObserver<Req>) serviceImpl.streamClusters(
(io.grpc.stub.StreamObserver<io.envoyproxy.envoy.api.v2.DiscoveryResponse>) responseObserver);
case METHODID_DELTA_CLUSTERS:
return (io.grpc.stub.StreamObserver<Req>) serviceImpl.deltaClusters(
(io.grpc.stub.StreamObserver<io.envoyproxy.envoy.api.v2.DeltaDiscoveryResponse>) responseObserver);
default:
throw new AssertionError();
}
}
}
private static abstract class ClusterDiscoveryServiceBaseDescriptorSupplier
implements io.grpc.protobuf.ProtoFileDescriptorSupplier, io.grpc.protobuf.ProtoServiceDescriptorSupplier {
ClusterDiscoveryServiceBaseDescriptorSupplier() {}
@java.lang.Override
public com.google.protobuf.Descriptors.FileDescriptor getFileDescriptor() {
return io.envoyproxy.envoy.api.v2.CdsProto.getDescriptor();
}
@java.lang.Override
public com.google.protobuf.Descriptors.ServiceDescriptor getServiceDescriptor() {
return getFileDescriptor().findServiceByName("ClusterDiscoveryService");
}
}
private static final class ClusterDiscoveryServiceFileDescriptorSupplier
extends ClusterDiscoveryServiceBaseDescriptorSupplier {
ClusterDiscoveryServiceFileDescriptorSupplier() {}
}
private static final class ClusterDiscoveryServiceMethodDescriptorSupplier
extends ClusterDiscoveryServiceBaseDescriptorSupplier
implements io.grpc.protobuf.ProtoMethodDescriptorSupplier {
private final String methodName;
ClusterDiscoveryServiceMethodDescriptorSupplier(String methodName) {
this.methodName = methodName;
}
@java.lang.Override
public com.google.protobuf.Descriptors.MethodDescriptor getMethodDescriptor() {
return getServiceDescriptor().findMethodByName(methodName);
}
}
private static volatile io.grpc.ServiceDescriptor serviceDescriptor;
public static io.grpc.ServiceDescriptor getServiceDescriptor() {
io.grpc.ServiceDescriptor result = serviceDescriptor;
if (result == null) {
synchronized (ClusterDiscoveryServiceGrpc.class) {
result = serviceDescriptor;
if (result == null) {
serviceDescriptor = result = io.grpc.ServiceDescriptor.newBuilder(SERVICE_NAME)
.setSchemaDescriptor(new ClusterDiscoveryServiceFileDescriptorSupplier())
.addMethod(getStreamClustersMethod())
.addMethod(getDeltaClustersMethod())
.addMethod(getFetchClustersMethod())
.build();
}
}
}
return result;
}
}

View File

@ -18,9 +18,9 @@ package io.grpc.xds;
import static com.google.common.base.Preconditions.checkNotNull;
import com.github.udpa.udpa.data.orca.v1.OrcaLoadReport;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.MoreObjects;
import io.envoyproxy.udpa.data.orca.v1.OrcaLoadReport;
import io.grpc.ClientStreamTracer;
import io.grpc.ClientStreamTracer.StreamInfo;
import io.grpc.LoadBalancer.PickResult;

View File

@ -16,8 +16,8 @@
package io.grpc.xds;
import com.github.udpa.udpa.data.orca.v1.OrcaLoadReport;
import com.google.common.annotations.VisibleForTesting;
import io.envoyproxy.udpa.data.orca.v1.OrcaLoadReport;
import io.grpc.Context;
import io.grpc.Contexts;
import io.grpc.ExperimentalApi;

View File

@ -22,15 +22,15 @@ import static io.grpc.ConnectivityState.IDLE;
import static io.grpc.ConnectivityState.READY;
import static io.grpc.ConnectivityState.SHUTDOWN;
import com.github.udpa.udpa.data.orca.v1.OrcaLoadReport;
import com.github.udpa.udpa.service.orca.v1.OpenRcaServiceGrpc;
import com.github.udpa.udpa.service.orca.v1.OrcaLoadReportRequest;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.MoreObjects;
import com.google.common.base.Objects;
import com.google.common.base.Stopwatch;
import com.google.common.base.Supplier;
import com.google.protobuf.util.Durations;
import io.envoyproxy.udpa.data.orca.v1.OrcaLoadReport;
import io.envoyproxy.udpa.service.orca.v1.OpenRcaServiceGrpc;
import io.envoyproxy.udpa.service.orca.v1.OrcaLoadReportRequest;
import io.grpc.CallOptions;
import io.grpc.Channel;
import io.grpc.ChannelLogger;

View File

@ -18,8 +18,8 @@ package io.grpc.xds;
import static com.google.common.base.Preconditions.checkNotNull;
import com.github.udpa.udpa.data.orca.v1.OrcaLoadReport;
import com.google.common.annotations.VisibleForTesting;
import io.envoyproxy.udpa.data.orca.v1.OrcaLoadReport;
import io.grpc.CallOptions;
import io.grpc.ClientStreamTracer;
import io.grpc.ClientStreamTracer.StreamInfo;

View File

@ -23,7 +23,7 @@ import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import io.envoyproxy.udpa.data.orca.v1.OrcaLoadReport;
import com.github.udpa.udpa.data.orca.v1.OrcaLoadReport;
import io.grpc.ClientStreamTracer;
import io.grpc.ClientStreamTracer.Factory;
import io.grpc.ClientStreamTracer.StreamInfo;

View File

@ -18,7 +18,7 @@ package io.grpc.xds;
import static com.google.common.truth.Truth.assertThat;
import io.envoyproxy.udpa.data.orca.v1.OrcaLoadReport;
import com.github.udpa.udpa.data.orca.v1.OrcaLoadReport;
import io.grpc.CallOptions;
import io.grpc.Channel;
import io.grpc.ClientCall;

View File

@ -34,11 +34,11 @@ import static org.mockito.Mockito.verifyNoMoreInteractions;
import static org.mockito.Mockito.verifyZeroInteractions;
import static org.mockito.Mockito.when;
import com.github.udpa.udpa.data.orca.v1.OrcaLoadReport;
import com.github.udpa.udpa.service.orca.v1.OpenRcaServiceGrpc;
import com.github.udpa.udpa.service.orca.v1.OrcaLoadReportRequest;
import com.google.common.util.concurrent.MoreExecutors;
import com.google.protobuf.util.Durations;
import io.envoyproxy.udpa.data.orca.v1.OrcaLoadReport;
import io.envoyproxy.udpa.service.orca.v1.OpenRcaServiceGrpc;
import io.envoyproxy.udpa.service.orca.v1.OrcaLoadReportRequest;
import io.grpc.Attributes;
import io.grpc.Channel;
import io.grpc.ChannelLogger;

View File

@ -27,7 +27,7 @@ import static org.mockito.Mockito.verifyNoMoreInteractions;
import static org.mockito.Mockito.verifyZeroInteractions;
import static org.mockito.Mockito.when;
import io.envoyproxy.udpa.data.orca.v1.OrcaLoadReport;
import com.github.udpa.udpa.data.orca.v1.OrcaLoadReport;
import io.grpc.ClientStreamTracer;
import io.grpc.Metadata;
import io.grpc.xds.OrcaPerRequestUtil.OrcaPerRequestReportListener;

View File

@ -13,31 +13,21 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Only run this script on Linux environment.
# Update VERSION then in this directory run ./import.sh
set -e
BRANCH=master
# import VERSION from one of the google internal CLs
VERSION=89eb31bcbe2308bf1e9073620e843bf472363495
VERSION=6ff0bce8ff417a252cde4d04dfb9cba2bab463d8
GIT_REPO="https://github.com/envoyproxy/envoy.git"
GIT_BASE_DIR=envoy
SOURCE_PROTO_BASE_DIR=envoy/api
TARGET_PROTO_BASE_DIR=src/main/proto
FILES=(
udpa/data/orca/v1/orca_load_report.proto
udpa/service/orca/v1/orca.proto
envoy/api/v2/auth/cert.proto
envoy/api/v2/cds.proto
envoy/api/v2/cluster/circuit_breaker.proto
envoy/api/v2/cluster/outlier_detection.proto
envoy/api/v2/core/address.proto
envoy/api/v2/core/base.proto
envoy/api/v2/core/config_source.proto
envoy/api/v2/core/grpc_service.proto
envoy/api/v2/core/health_check.proto
envoy/api/v2/core/protocol.proto
envoy/api/v2/core/http_uri.proto
envoy/api/v2/discovery.proto
envoy/api/v2/eds.proto
envoy/api/v2/endpoint/endpoint.proto
@ -60,6 +50,7 @@ popd
cp -p "${tmpdir}/${GIT_BASE_DIR}/LICENSE" LICENSE
cp -p "${tmpdir}/${GIT_BASE_DIR}/NOTICE" NOTICE
rm -rf "${TARGET_PROTO_BASE_DIR}"
mkdir -p "${TARGET_PROTO_BASE_DIR}"
pushd "${TARGET_PROTO_BASE_DIR}"

View File

@ -1,380 +0,0 @@
syntax = "proto3";
package envoy.api.v2.auth;
option java_outer_classname = "CertProto";
option java_multiple_files = true;
option java_package = "io.envoyproxy.envoy.api.v2.auth";
option go_package = "auth";
import "envoy/api/v2/core/base.proto";
import "envoy/api/v2/core/config_source.proto";
import "google/protobuf/wrappers.proto";
import "validate/validate.proto";
import "gogoproto/gogo.proto";
option (gogoproto.equal_all) = true;
// [#protodoc-title: Common TLS configuration]
message TlsParameters {
enum TlsProtocol {
// Envoy will choose the optimal TLS version.
TLS_AUTO = 0;
// TLS 1.0
TLSv1_0 = 1;
// TLS 1.1
TLSv1_1 = 2;
// TLS 1.2
TLSv1_2 = 3;
// TLS 1.3
TLSv1_3 = 4;
}
// Minimum TLS protocol version. By default, it's ``TLSv1_0``.
TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum.defined_only = true];
// Maximum TLS protocol version. By default, it's ``TLSv1_3`` for servers in non-FIPS builds, and
// ``TLSv1_2`` for clients and for servers using :ref:`BoringSSL FIPS <arch_overview_ssl_fips>`.
TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum.defined_only = true];
// If specified, the TLS listener will only support the specified `cipher list
// <https://commondatastorage.googleapis.com/chromium-boringssl-docs/ssl.h.html#Cipher-suite-configuration>`_
// when negotiating TLS 1.0-1.2 (this setting has no effect when negotiating TLS 1.3). If not
// specified, the default list will be used.
//
// In non-FIPS builds, the default cipher list is:
//
// .. code-block:: none
//
// [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305]
// [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305]
// ECDHE-ECDSA-AES128-SHA
// ECDHE-RSA-AES128-SHA
// AES128-GCM-SHA256
// AES128-SHA
// ECDHE-ECDSA-AES256-GCM-SHA384
// ECDHE-RSA-AES256-GCM-SHA384
// ECDHE-ECDSA-AES256-SHA
// ECDHE-RSA-AES256-SHA
// AES256-GCM-SHA384
// AES256-SHA
//
// In builds using :ref:`BoringSSL FIPS <arch_overview_ssl_fips>`, the default cipher list is:
//
// .. code-block:: none
//
// ECDHE-ECDSA-AES128-GCM-SHA256
// ECDHE-RSA-AES128-GCM-SHA256
// ECDHE-ECDSA-AES128-SHA
// ECDHE-RSA-AES128-SHA
// AES128-GCM-SHA256
// AES128-SHA
// ECDHE-ECDSA-AES256-GCM-SHA384
// ECDHE-RSA-AES256-GCM-SHA384
// ECDHE-ECDSA-AES256-SHA
// ECDHE-RSA-AES256-SHA
// AES256-GCM-SHA384
// AES256-SHA
repeated string cipher_suites = 3;
// If specified, the TLS connection will only support the specified ECDH
// curves. If not specified, the default curves will be used.
//
// In non-FIPS builds, the default curves are:
//
// .. code-block:: none
//
// X25519
// P-256
//
// In builds using :ref:`BoringSSL FIPS <arch_overview_ssl_fips>`, the default curve is:
//
// .. code-block:: none
//
// P-256
repeated string ecdh_curves = 4;
}
message TlsCertificate {
// The TLS certificate chain.
core.DataSource certificate_chain = 1;
// The TLS private key.
core.DataSource private_key = 2;
// The password to decrypt the TLS private key. If this field is not set, it is assumed that the
// TLS private key is not password encrypted.
core.DataSource password = 3;
// [#not-implemented-hide:]
core.DataSource ocsp_staple = 4;
// [#not-implemented-hide:]
repeated core.DataSource signed_certificate_timestamp = 5;
}
message TlsSessionTicketKeys {
// Keys for encrypting and decrypting TLS session tickets. The
// first key in the array contains the key to encrypt all new sessions created by this context.
// All keys are candidates for decrypting received tickets. This allows for easy rotation of keys
// by, for example, putting the new key first, and the previous key second.
//
// If :ref:`session_ticket_keys <envoy_api_field_auth.DownstreamTlsContext.session_ticket_keys>`
// is not specified, the TLS library will still support resuming sessions via tickets, but it will
// use an internally-generated and managed key, so sessions cannot be resumed across hot restarts
// or on different hosts.
//
// Each key must contain exactly 80 bytes of cryptographically-secure random data. For
// example, the output of ``openssl rand 80``.
//
// .. attention::
//
// Using this feature has serious security considerations and risks. Improper handling of keys
// may result in loss of secrecy in connections, even if ciphers supporting perfect forward
// secrecy are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some
// discussion. To minimize the risk, you must:
//
// * Keep the session ticket keys at least as secure as your TLS certificate private keys
// * Rotate session ticket keys at least daily, and preferably hourly
// * Always generate keys using a cryptographically-secure random data source
repeated core.DataSource keys = 1 [(validate.rules).repeated .min_items = 1];
}
message CertificateValidationContext {
// TLS certificate data containing certificate authority certificates to use in verifying
// a presented peer certificate (e.g. server certificate for clusters or client certificate
// for listeners). If not specified and a peer certificate is presented it will not be
// verified. By default, a client certificate is optional, unless one of the additional
// options (:ref:`require_client_certificate
// <envoy_api_field_auth.DownstreamTlsContext.require_client_certificate>`,
// :ref:`verify_certificate_spki
// <envoy_api_field_auth.CertificateValidationContext.verify_certificate_spki>`,
// :ref:`verify_certificate_hash
// <envoy_api_field_auth.CertificateValidationContext.verify_certificate_hash>`, or
// :ref:`verify_subject_alt_name
// <envoy_api_field_auth.CertificateValidationContext.verify_subject_alt_name>`) is also
// specified.
//
// It can optionally contain certificate revocation lists, in which case Envoy will verify
// that the presented peer certificate has not been revoked by one of the included CRLs.
//
// See :ref:`the TLS overview <arch_overview_ssl_enabling_verification>` for a list of common
// system CA locations.
core.DataSource trusted_ca = 1;
// An optional list of base64-encoded SHA-256 hashes. If specified, Envoy will verify that the
// SHA-256 of the DER-encoded Subject Public Key Information (SPKI) of the presented certificate
// matches one of the specified values.
//
// A base64-encoded SHA-256 of the Subject Public Key Information (SPKI) of the certificate
// can be generated with the following command:
//
// .. code-block:: bash
//
// $ openssl x509 -in path/to/client.crt -noout -pubkey \
// | openssl pkey -pubin -outform DER \
// | openssl dgst -sha256 -binary \
// | openssl enc -base64
// NvqYIYSbgK2vCJpQhObf77vv+bQWtc5ek5RIOwPiC9A=
//
// This is the format used in HTTP Public Key Pinning.
//
// When both:
// :ref:`verify_certificate_hash
// <envoy_api_field_auth.CertificateValidationContext.verify_certificate_hash>` and
// :ref:`verify_certificate_spki
// <envoy_api_field_auth.CertificateValidationContext.verify_certificate_spki>` are specified,
// a hash matching value from either of the lists will result in the certificate being accepted.
//
// .. attention::
//
// This option is preferred over :ref:`verify_certificate_hash
// <envoy_api_field_auth.CertificateValidationContext.verify_certificate_hash>`,
// because SPKI is tied to a private key, so it doesn't change when the certificate
// is renewed using the same private key.
repeated string verify_certificate_spki = 3
[(validate.rules).repeated .items.string = {min_bytes: 44, max_bytes: 44}];
// An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that
// the SHA-256 of the DER-encoded presented certificate matches one of the specified values.
//
// A hex-encoded SHA-256 of the certificate can be generated with the following command:
//
// .. code-block:: bash
//
// $ openssl x509 -in path/to/client.crt -outform DER | openssl dgst -sha256 | cut -d" " -f2
// df6ff72fe9116521268f6f2dd4966f51df479883fe7037b39f75916ac3049d1a
//
// A long hex-encoded and colon-separated SHA-256 (a.k.a. "fingerprint") of the certificate
// can be generated with the following command:
//
// .. code-block:: bash
//
// $ openssl x509 -in path/to/client.crt -noout -fingerprint -sha256 | cut -d"=" -f2
// DF:6F:F7:2F:E9:11:65:21:26:8F:6F:2D:D4:96:6F:51:DF:47:98:83:FE:70:37:B3:9F:75:91:6A:C3:04:9D:1A
//
// Both of those formats are acceptable.
//
// When both:
// :ref:`verify_certificate_hash
// <envoy_api_field_auth.CertificateValidationContext.verify_certificate_hash>` and
// :ref:`verify_certificate_spki
// <envoy_api_field_auth.CertificateValidationContext.verify_certificate_spki>` are specified,
// a hash matching value from either of the lists will result in the certificate being accepted.
repeated string verify_certificate_hash = 2
[(validate.rules).repeated .items.string = {min_bytes: 64, max_bytes: 95}];
// An optional list of Subject Alternative Names. If specified, Envoy will verify that the
// Subject Alternative Name of the presented certificate matches one of the specified values.
//
// .. attention::
//
// Subject Alternative Names are easily spoofable and verifying only them is insecure,
// therefore this option must be used together with :ref:`trusted_ca
// <envoy_api_field_auth.CertificateValidationContext.trusted_ca>`.
repeated string verify_subject_alt_name = 4;
// [#not-implemented-hide:] Must present a signed time-stamped OCSP response.
google.protobuf.BoolValue require_ocsp_staple = 5;
// [#not-implemented-hide:] Must present signed certificate time-stamp.
google.protobuf.BoolValue require_signed_certificate_timestamp = 6;
// An optional `certificate revocation list
// <https://en.wikipedia.org/wiki/Certificate_revocation_list>`_
// (in PEM format). If specified, Envoy will verify that the presented peer
// certificate has not been revoked by this CRL. If this DataSource contains
// multiple CRLs, all of them will be used.
core.DataSource crl = 7;
// If specified, Envoy will not reject expired certificates.
bool allow_expired_certificate = 8;
}
// TLS context shared by both client and server TLS contexts.
message CommonTlsContext {
// TLS protocol versions, cipher suites etc.
TlsParameters tls_params = 1;
// :ref:`Multiple TLS certificates <arch_overview_ssl_cert_select>` can be associated with the
// same context to allow both RSA and ECDSA certificates.
//
// Only a single TLS certificate is supported in client contexts. In server contexts, the first
// RSA certificate is used for clients that only support RSA and the first ECDSA certificate is
// used for clients that support ECDSA.
repeated TlsCertificate tls_certificates = 2;
// Configs for fetching TLS certificates via SDS API.
repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6
[(validate.rules).repeated .max_items = 1];
message CombinedCertificateValidationContext {
// How to validate peer certificates.
CertificateValidationContext default_validation_context = 1
[(validate.rules).message.required = true];
// Config for fetching validation context via SDS API.
SdsSecretConfig validation_context_sds_secret_config = 2
[(validate.rules).message.required = true];
};
oneof validation_context_type {
// How to validate peer certificates.
CertificateValidationContext validation_context = 3;
// Config for fetching validation context via SDS API.
SdsSecretConfig validation_context_sds_secret_config = 7;
// Combined certificate validation context holds a default CertificateValidationContext
// and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic
// and default CertificateValidationContext are merged into a new CertificateValidationContext
// for validation. This merge is done by Message::MergeFrom(), so dynamic
// CertificateValidationContext overwrites singular fields in default
// CertificateValidationContext, and concatenates repeated fields to default
// CertificateValidationContext, and logical OR is applied to boolean fields.
CombinedCertificateValidationContext combined_validation_context = 8;
}
// Supplies the list of ALPN protocols that the listener should expose. In
// practice this is likely to be set to one of two values (see the
// :ref:`codec_type
// <envoy_api_field_config.filter.network.http_connection_manager.v2.HttpConnectionManager.codec_type>`
// parameter in the HTTP connection manager for more information):
//
// * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1.
// * "http/1.1" If the listener is only going to support HTTP/1.1.
//
// There is no default for this parameter. If empty, Envoy will not expose ALPN.
repeated string alpn_protocols = 4;
reserved 5;
}
message UpstreamTlsContext {
// Common TLS context settings.
CommonTlsContext common_tls_context = 1;
// SNI string to use when creating TLS backend connections.
string sni = 2 [(validate.rules).string.max_bytes = 255];
// If true, server-initiated TLS renegotiation will be allowed.
//
// .. attention::
//
// TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary.
bool allow_renegotiation = 3;
// Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets
// for TLSv1.2 and older) to store for the purpose of session resumption.
//
// Defaults to 1, setting this to 0 disables session resumption.
google.protobuf.UInt32Value max_session_keys = 4;
}
message DownstreamTlsContext {
// Common TLS context settings.
CommonTlsContext common_tls_context = 1;
// If specified, Envoy will reject connections without a valid client
// certificate.
google.protobuf.BoolValue require_client_certificate = 2;
// If specified, Envoy will reject connections without a valid and matching SNI.
// [#not-implemented-hide:]
google.protobuf.BoolValue require_sni = 3;
oneof session_ticket_keys_type {
// TLS session ticket key settings.
TlsSessionTicketKeys session_ticket_keys = 4;
// [#not-implemented-hide:]
SdsSecretConfig session_ticket_keys_sds_secret_config = 5;
}
}
// [#proto-status: experimental]
message SdsSecretConfig {
// Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to.
// When both name and config are specified, then secret can be fetched and/or reloaded via SDS.
// When only name is specified, then secret will be loaded from static resources [V2-API-DIFF].
string name = 1;
core.ConfigSource sds_config = 2;
}
// [#proto-status: experimental]
message Secret {
// Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to.
string name = 1;
oneof type {
TlsCertificate tls_certificate = 2;
TlsSessionTicketKeys session_ticket_keys = 3;
CertificateValidationContext validation_context = 4;
}
}

View File

@ -1,641 +0,0 @@
syntax = "proto3";
package envoy.api.v2;
option java_outer_classname = "CdsProto";
option java_multiple_files = true;
option java_package = "io.envoyproxy.envoy.api.v2";
option java_generic_services = true;
import "envoy/api/v2/core/address.proto";
import "envoy/api/v2/auth/cert.proto";
import "envoy/api/v2/core/base.proto";
import "envoy/api/v2/core/config_source.proto";
import "envoy/api/v2/discovery.proto";
import "envoy/api/v2/core/health_check.proto";
import "envoy/api/v2/core/protocol.proto";
import "envoy/api/v2/cluster/circuit_breaker.proto";
import "envoy/api/v2/cluster/outlier_detection.proto";
import "envoy/api/v2/eds.proto";
import "envoy/type/percent.proto";
import "google/api/annotations.proto";
import "google/protobuf/any.proto";
import "google/protobuf/duration.proto";
import "google/protobuf/struct.proto";
import "google/protobuf/wrappers.proto";
import "validate/validate.proto";
import "gogoproto/gogo.proto";
option (gogoproto.equal_all) = true;
option (gogoproto.stable_marshaler_all) = true;
// Return list of all clusters this proxy will load balance to.
service ClusterDiscoveryService {
rpc StreamClusters(stream DiscoveryRequest) returns (stream DiscoveryResponse) {
}
rpc DeltaClusters(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) {
}
rpc FetchClusters(DiscoveryRequest) returns (DiscoveryResponse) {
option (google.api.http) = {
post: "/v2/discovery:clusters"
body: "*"
};
}
}
// [#protodoc-title: Clusters]
// Configuration for a single upstream cluster.
// [#comment:next free field: 39]
message Cluster {
// Supplies the name of the cluster which must be unique across all clusters.
// The cluster name is used when emitting
// :ref:`statistics <config_cluster_manager_cluster_stats>` if :ref:`alt_stat_name
// <envoy_api_field_Cluster.alt_stat_name>` is not provided.
// Any ``:`` in the cluster name will be converted to ``_`` when emitting statistics.
string name = 1 [(validate.rules).string.min_bytes = 1];
// An optional alternative to the cluster name to be used while emitting stats.
// Any ``:`` in the name will be converted to ``_`` when emitting statistics. This should not be
// confused with :ref:`Router Filter Header
// <config_http_filters_router_x-envoy-upstream-alt-stat-name>`.
string alt_stat_name = 28;
// Refer to :ref:`service discovery type <arch_overview_service_discovery_types>`
// for an explanation on each type.
enum DiscoveryType {
// Refer to the :ref:`static discovery type<arch_overview_service_discovery_types_static>`
// for an explanation.
STATIC = 0;
// Refer to the :ref:`strict DNS discovery
// type<arch_overview_service_discovery_types_strict_dns>`
// for an explanation.
STRICT_DNS = 1;
// Refer to the :ref:`logical DNS discovery
// type<arch_overview_service_discovery_types_logical_dns>`
// for an explanation.
LOGICAL_DNS = 2;
// Refer to the :ref:`service discovery type<arch_overview_service_discovery_types_eds>`
// for an explanation.
EDS = 3;
// Refer to the :ref:`original destination discovery
// type<arch_overview_service_discovery_types_original_destination>`
// for an explanation.
ORIGINAL_DST = 4;
}
// Extended cluster type.
message CustomClusterType {
// The type of the cluster to instantiate. The name must match a supported cluster type.
string name = 1 [(validate.rules).string.min_bytes = 1];
// Cluster specific configuration which depends on the cluster being instantiated.
// See the supported cluster for further documentation.
google.protobuf.Any typed_config = 2;
}
oneof cluster_discovery_type {
// The :ref:`service discovery type <arch_overview_service_discovery_types>`
// to use for resolving the cluster.
DiscoveryType type = 2 [(validate.rules).enum.defined_only = true];
// The custom cluster type.
CustomClusterType cluster_type = 38;
}
// Only valid when discovery type is EDS.
message EdsClusterConfig {
// Configuration for the source of EDS updates for this Cluster.
core.ConfigSource eds_config = 1;
// Optional alternative to cluster name to present to EDS. This does not
// have the same restrictions as cluster name, i.e. it may be arbitrary
// length.
string service_name = 2;
}
// Configuration to use for EDS updates for the Cluster.
EdsClusterConfig eds_cluster_config = 3;
// The timeout for new network connections to hosts in the cluster.
google.protobuf.Duration connect_timeout = 4 [
(validate.rules).duration.gt = {},
(gogoproto.stdduration) = true,
(gogoproto.nullable) = false
];
// Soft limit on size of the clusters connections read and write buffers. If
// unspecified, an implementation defined default is applied (1MiB).
google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5;
// Refer to :ref:`load balancer type <arch_overview_load_balancing_types>` architecture
// overview section for information on each type.
enum LbPolicy {
// Refer to the :ref:`round robin load balancing
// policy<arch_overview_load_balancing_types_round_robin>`
// for an explanation.
ROUND_ROBIN = 0;
// Refer to the :ref:`least request load balancing
// policy<arch_overview_load_balancing_types_least_request>`
// for an explanation.
LEAST_REQUEST = 1;
// Refer to the :ref:`ring hash load balancing
// policy<arch_overview_load_balancing_types_ring_hash>`
// for an explanation.
RING_HASH = 2;
// Refer to the :ref:`random load balancing
// policy<arch_overview_load_balancing_types_random>`
// for an explanation.
RANDOM = 3;
// Refer to the :ref:`original destination load balancing
// policy<arch_overview_load_balancing_types_original_destination>`
// for an explanation.
ORIGINAL_DST_LB = 4;
// Refer to the :ref:`Maglev load balancing policy<arch_overview_load_balancing_types_maglev>`
// for an explanation.
MAGLEV = 5;
// This load balancer type must be specified if the configured cluster provides a cluster
// specific load balancer. Consult the configured cluster's documentation for whether to set
// this option or not.
CLUSTER_PROVIDED = 6;
}
// The :ref:`load balancer type <arch_overview_load_balancing_types>` to use
// when picking a host in the cluster.
LbPolicy lb_policy = 6 [(validate.rules).enum.defined_only = true];
// If the service discovery type is
// :ref:`STATIC<envoy_api_enum_value_Cluster.DiscoveryType.STATIC>`,
// :ref:`STRICT_DNS<envoy_api_enum_value_Cluster.DiscoveryType.STRICT_DNS>`
// or :ref:`LOGICAL_DNS<envoy_api_enum_value_Cluster.DiscoveryType.LOGICAL_DNS>`,
// then hosts is required.
//
// .. attention::
//
// **This field is deprecated**. Set the
// :ref:`load_assignment<envoy_api_field_Cluster.load_assignment>` field instead.
//
repeated core.Address hosts = 7 [deprecated = true];
// Setting this is required for specifying members of
// :ref:`STATIC<envoy_api_enum_value_Cluster.DiscoveryType.STATIC>`,
// :ref:`STRICT_DNS<envoy_api_enum_value_Cluster.DiscoveryType.STRICT_DNS>`
// or :ref:`LOGICAL_DNS<envoy_api_enum_value_Cluster.DiscoveryType.LOGICAL_DNS>` clusters.
// This field supersedes :ref:`hosts<envoy_api_field_Cluster.hosts>` field.
// [#comment:TODO(dio): Deprecate the hosts field and add it to :ref:`deprecated log<deprecated>`
// once load_assignment is implemented.]
//
// .. attention::
//
// Setting this allows non-EDS cluster types to contain embedded EDS equivalent
// :ref:`endpoint assignments<envoy_api_msg_ClusterLoadAssignment>`.
// Setting this overrides :ref:`hosts<envoy_api_field_Cluster.hosts>` values.
//
ClusterLoadAssignment load_assignment = 33;
// Optional :ref:`active health checking <arch_overview_health_checking>`
// configuration for the cluster. If no
// configuration is specified no health checking will be done and all cluster
// members will be considered healthy at all times.
repeated core.HealthCheck health_checks = 8;
// Optional maximum requests for a single upstream connection. This parameter
// is respected by both the HTTP/1.1 and HTTP/2 connection pool
// implementations. If not specified, there is no limit. Setting this
// parameter to 1 will effectively disable keep alive.
google.protobuf.UInt32Value max_requests_per_connection = 9;
// Optional :ref:`circuit breaking <arch_overview_circuit_break>` for the cluster.
cluster.CircuitBreakers circuit_breakers = 10;
// The TLS configuration for connections to the upstream cluster. If no TLS
// configuration is specified, TLS will not be used for new connections.
//
// .. attention::
//
// Server certificate verification is not enabled by default. Configure
// :ref:`trusted_ca<envoy_api_field_auth.CertificateValidationContext.trusted_ca>` to enable
// verification.
auth.UpstreamTlsContext tls_context = 11;
reserved 12;
// Additional options when handling HTTP requests. These options will be applicable to both
// HTTP1 and HTTP2 requests.
core.HttpProtocolOptions common_http_protocol_options = 29;
// Additional options when handling HTTP1 requests.
core.Http1ProtocolOptions http_protocol_options = 13;
// Even if default HTTP2 protocol options are desired, this field must be
// set so that Envoy will assume that the upstream supports HTTP/2 when
// making new HTTP connection pool connections. Currently, Envoy only
// supports prior knowledge for upstream connections. Even if TLS is used
// with ALPN, `http2_protocol_options` must be specified. As an aside this allows HTTP/2
// connections to happen over plain text.
core.Http2ProtocolOptions http2_protocol_options = 14;
// The extension_protocol_options field is used to provide extension-specific protocol options
// for upstream connections. The key should match the extension filter name, such as
// "envoy.filters.network.thrift_proxy". See the extension's documentation for details on
// specific options.
map<string, google.protobuf.Struct> extension_protocol_options = 35;
// The extension_protocol_options field is used to provide extension-specific protocol options
// for upstream connections. The key should match the extension filter name, such as
// "envoy.filters.network.thrift_proxy". See the extension's documentation for details on
// specific options.
map<string, google.protobuf.Any> typed_extension_protocol_options = 36;
reserved 15;
// If the DNS refresh rate is specified and the cluster type is either
// :ref:`STRICT_DNS<envoy_api_enum_value_Cluster.DiscoveryType.STRICT_DNS>`,
// or :ref:`LOGICAL_DNS<envoy_api_enum_value_Cluster.DiscoveryType.LOGICAL_DNS>`,
// this value is used as the clusters DNS refresh
// rate. If this setting is not specified, the value defaults to 5000ms. For
// cluster types other than
// :ref:`STRICT_DNS<envoy_api_enum_value_Cluster.DiscoveryType.STRICT_DNS>`
// and :ref:`LOGICAL_DNS<envoy_api_enum_value_Cluster.DiscoveryType.LOGICAL_DNS>`
// this setting is ignored.
google.protobuf.Duration dns_refresh_rate = 16
[(validate.rules).duration.gt = {}, (gogoproto.stdduration) = true];
// When V4_ONLY is selected, the DNS resolver will only perform a lookup for
// addresses in the IPv4 family. If V6_ONLY is selected, the DNS resolver will
// only perform a lookup for addresses in the IPv6 family. If AUTO is
// specified, the DNS resolver will first perform a lookup for addresses in
// the IPv6 family and fallback to a lookup for addresses in the IPv4 family.
// For cluster types other than
// :ref:`STRICT_DNS<envoy_api_enum_value_Cluster.DiscoveryType.STRICT_DNS>` and
// :ref:`LOGICAL_DNS<envoy_api_enum_value_Cluster.DiscoveryType.LOGICAL_DNS>`,
// this setting is
// ignored.
enum DnsLookupFamily {
AUTO = 0;
V4_ONLY = 1;
V6_ONLY = 2;
}
// The DNS IP address resolution policy. If this setting is not specified, the
// value defaults to
// :ref:`AUTO<envoy_api_enum_value_Cluster.DnsLookupFamily.AUTO>`.
DnsLookupFamily dns_lookup_family = 17 [(validate.rules).enum.defined_only = true];
// If DNS resolvers are specified and the cluster type is either
// :ref:`STRICT_DNS<envoy_api_enum_value_Cluster.DiscoveryType.STRICT_DNS>`,
// or :ref:`LOGICAL_DNS<envoy_api_enum_value_Cluster.DiscoveryType.LOGICAL_DNS>`,
// this value is used to specify the clusters dns resolvers.
// If this setting is not specified, the value defaults to the default
// resolver, which uses /etc/resolv.conf for configuration. For cluster types
// other than
// :ref:`STRICT_DNS<envoy_api_enum_value_Cluster.DiscoveryType.STRICT_DNS>`
// and :ref:`LOGICAL_DNS<envoy_api_enum_value_Cluster.DiscoveryType.LOGICAL_DNS>`
// this setting is ignored.
repeated core.Address dns_resolvers = 18;
// If specified, outlier detection will be enabled for this upstream cluster.
// Each of the configuration values can be overridden via
// :ref:`runtime values <config_cluster_manager_cluster_runtime_outlier_detection>`.
cluster.OutlierDetection outlier_detection = 19;
// The interval for removing stale hosts from a cluster type
// :ref:`ORIGINAL_DST<envoy_api_enum_value_Cluster.DiscoveryType.ORIGINAL_DST>`.
// Hosts are considered stale if they have not been used
// as upstream destinations during this interval. New hosts are added
// to original destination clusters on demand as new connections are
// redirected to Envoy, causing the number of hosts in the cluster to
// grow over time. Hosts that are not stale (they are actively used as
// destinations) are kept in the cluster, which allows connections to
// them remain open, saving the latency that would otherwise be spent
// on opening new connections. If this setting is not specified, the
// value defaults to 5000ms. For cluster types other than
// :ref:`ORIGINAL_DST<envoy_api_enum_value_Cluster.DiscoveryType.ORIGINAL_DST>`
// this setting is ignored.
google.protobuf.Duration cleanup_interval = 20
[(validate.rules).duration.gt = {}, (gogoproto.stdduration) = true];
// Optional configuration used to bind newly established upstream connections.
// This overrides any bind_config specified in the bootstrap proto.
// If the address and port are empty, no bind will be performed.
core.BindConfig upstream_bind_config = 21;
// Optionally divide the endpoints in this cluster into subsets defined by
// endpoint metadata and selected by route and weighted cluster metadata.
message LbSubsetConfig {
// If NO_FALLBACK is selected, a result
// equivalent to no healthy hosts is reported. If ANY_ENDPOINT is selected,
// any cluster endpoint may be returned (subject to policy, health checks,
// etc). If DEFAULT_SUBSET is selected, load balancing is performed over the
// endpoints matching the values from the default_subset field.
enum LbSubsetFallbackPolicy {
NO_FALLBACK = 0;
ANY_ENDPOINT = 1;
DEFAULT_SUBSET = 2;
}
// The behavior used when no endpoint subset matches the selected route's
// metadata. The value defaults to
// :ref:`NO_FALLBACK<envoy_api_enum_value_Cluster.LbSubsetConfig.LbSubsetFallbackPolicy.NO_FALLBACK>`.
LbSubsetFallbackPolicy fallback_policy = 1 [(validate.rules).enum.defined_only = true];
// Specifies the default subset of endpoints used during fallback if
// fallback_policy is
// :ref:`DEFAULT_SUBSET<envoy_api_enum_value_Cluster.LbSubsetConfig.LbSubsetFallbackPolicy.DEFAULT_SUBSET>`.
// Each field in default_subset is
// compared to the matching LbEndpoint.Metadata under the *envoy.lb*
// namespace. It is valid for no hosts to match, in which case the behavior
// is the same as a fallback_policy of
// :ref:`NO_FALLBACK<envoy_api_enum_value_Cluster.LbSubsetConfig.LbSubsetFallbackPolicy.NO_FALLBACK>`.
google.protobuf.Struct default_subset = 2;
// Specifications for subsets.
message LbSubsetSelector {
// List of keys to match with the weighted cluster metadata.
repeated string keys = 1;
// The behavior used when no endpoint subset matches the selected route's
// metadata.
LbSubsetSelectorFallbackPolicy fallback_policy = 2
[(validate.rules).enum.defined_only = true];
// Allows to override top level fallback policy per selector.
enum LbSubsetSelectorFallbackPolicy {
// If NOT_DEFINED top level config fallback policy is used instead.
NOT_DEFINED = 0;
// If NO_FALLBACK is selected, a result equivalent to no healthy hosts is reported.
NO_FALLBACK = 1;
// If ANY_ENDPOINT is selected, any cluster endpoint may be returned
// (subject to policy, health checks, etc).
ANY_ENDPOINT = 2;
// If DEFAULT_SUBSET is selected, load balancing is performed over the
// endpoints matching the values from the default_subset field.
DEFAULT_SUBSET = 3;
}
}
// For each entry, LbEndpoint.Metadata's
// *envoy.lb* namespace is traversed and a subset is created for each unique
// combination of key and value. For example:
//
// .. code-block:: json
//
// { "subset_selectors": [
// { "keys": [ "version" ] },
// { "keys": [ "stage", "hardware_type" ] }
// ]}
//
// A subset is matched when the metadata from the selected route and
// weighted cluster contains the same keys and values as the subset's
// metadata. The same host may appear in multiple subsets.
repeated LbSubsetSelector subset_selectors = 3;
// If true, routing to subsets will take into account the localities and locality weights of the
// endpoints when making the routing decision.
//
// There are some potential pitfalls associated with enabling this feature, as the resulting
// traffic split after applying both a subset match and locality weights might be undesirable.
//
// Consider for example a situation in which you have 50/50 split across two localities X/Y
// which have 100 hosts each without subsetting. If the subset LB results in X having only 1
// host selected but Y having 100, then a lot more load is being dumped on the single host in X
// than originally anticipated in the load balancing assignment delivered via EDS.
bool locality_weight_aware = 4;
// When used with locality_weight_aware, scales the weight of each locality by the ratio
// of hosts in the subset vs hosts in the original subset. This aims to even out the load
// going to an individual locality if said locality is disproportionally affected by the
// subset predicate.
bool scale_locality_weight = 5;
// If true, when a fallback policy is configured and its corresponding subset fails to find
// a host this will cause any host to be selected instead.
//
// This is useful when using the default subset as the fallback policy, given the default
// subset might become empty. With this option enabled, if that happens the LB will attempt
// to select a host from the entire cluster.
bool panic_mode_any = 6;
// If true, metadata specified for a metadata key will be matched against the corresponding
// endpoint metadata if the endpoint metadata matches the value exactly OR it is a list value
// and any of the elements in the list matches the criteria.
bool list_as_any = 7;
}
// Configuration for load balancing subsetting.
LbSubsetConfig lb_subset_config = 22;
// Specific configuration for the LeastRequest load balancing policy.
message LeastRequestLbConfig {
// The number of random healthy hosts from which the host with the fewest active requests will
// be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set.
google.protobuf.UInt32Value choice_count = 1 [(validate.rules).uint32.gte = 2];
}
// Specific configuration for the :ref:`RingHash<arch_overview_load_balancing_types_ring_hash>`
// load balancing policy.
message RingHashLbConfig {
// Minimum hash ring size. The larger the ring is (that is, the more hashes there are for each
// provided host) the better the request distribution will reflect the desired weights. Defaults
// to 1024 entries, and limited to 8M entries. See also
// :ref:`maximum_ring_size<envoy_api_field_Cluster.RingHashLbConfig.maximum_ring_size>`.
google.protobuf.UInt64Value minimum_ring_size = 1 [(validate.rules).uint64.lte = 8388608];
reserved 2;
// The hash function used to hash hosts onto the ketama ring.
enum HashFunction {
// Use `xxHash <https://github.com/Cyan4973/xxHash>`_, this is the default hash function.
XX_HASH = 0;
// Use `MurmurHash2 <https://sites.google.com/site/murmurhash/>`_, this is compatible with
// std:hash<string> in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled
// on Linux and not macOS.
MURMUR_HASH_2 = 1;
}
// The hash function used to hash hosts onto the ketama ring. The value defaults to
// :ref:`XX_HASH<envoy_api_enum_value_Cluster.RingHashLbConfig.HashFunction.XX_HASH>`.
HashFunction hash_function = 3 [(validate.rules).enum.defined_only = true];
// Maximum hash ring size. Defaults to 8M entries, and limited to 8M entries, but can be lowered
// to further constrain resource use. See also
// :ref:`minimum_ring_size<envoy_api_field_Cluster.RingHashLbConfig.minimum_ring_size>`.
google.protobuf.UInt64Value maximum_ring_size = 4 [(validate.rules).uint64.lte = 8388608];
}
// Specific configuration for the
// :ref:`Original Destination <arch_overview_load_balancing_types_original_destination>`
// load balancing policy.
message OriginalDstLbConfig {
// When true, :ref:`x-envoy-original-dst-host
// <config_http_conn_man_headers_x-envoy-original-dst-host>` can be used to override destination
// address.
//
// .. attention::
//
// This header isn't sanitized by default, so enabling this feature allows HTTP clients to
// route traffic to arbitrary hosts and/or ports, which may have serious security
// consequences.
bool use_http_header = 1;
}
// Optional configuration for the load balancing algorithm selected by
// LbPolicy. Currently only
// :ref:`RING_HASH<envoy_api_enum_value_Cluster.LbPolicy.RING_HASH>` and
// :ref:`LEAST_REQUEST<envoy_api_enum_value_Cluster.LbPolicy.LEAST_REQUEST>`
// has additional configuration options.
// Specifying ring_hash_lb_config or least_request_lb_config without setting the corresponding
// LbPolicy will generate an error at runtime.
oneof lb_config {
// Optional configuration for the Ring Hash load balancing policy.
RingHashLbConfig ring_hash_lb_config = 23;
// Optional configuration for the Original Destination load balancing policy.
OriginalDstLbConfig original_dst_lb_config = 34;
// Optional configuration for the LeastRequest load balancing policy.
LeastRequestLbConfig least_request_lb_config = 37;
}
// Common configuration for all load balancer implementations.
message CommonLbConfig {
// Configures the :ref:`healthy panic threshold <arch_overview_load_balancing_panic_threshold>`.
// If not specified, the default is 50%.
//
// .. note::
// The specified percent will be truncated to the nearest 1%.
envoy.type.Percent healthy_panic_threshold = 1;
// Configuration for :ref:`zone aware routing
// <arch_overview_load_balancing_zone_aware_routing>`.
message ZoneAwareLbConfig {
// Configures percentage of requests that will be considered for zone aware routing
// if zone aware routing is configured. If not specified, the default is 100%.
// * :ref:`runtime values <config_cluster_manager_cluster_runtime_zone_routing>`.
// * :ref:`Zone aware routing support <arch_overview_load_balancing_zone_aware_routing>`.
envoy.type.Percent routing_enabled = 1;
// Configures minimum upstream cluster size required for zone aware routing
// If upstream cluster size is less than specified, zone aware routing is not performed
// even if zone aware routing is configured. If not specified, the default is 6.
// * :ref:`runtime values <config_cluster_manager_cluster_runtime_zone_routing>`.
// * :ref:`Zone aware routing support <arch_overview_load_balancing_zone_aware_routing>`.
google.protobuf.UInt64Value min_cluster_size = 2;
}
// Configuration for :ref:`locality weighted load balancing
// <arch_overview_load_balancing_locality_weighted_lb>`
message LocalityWeightedLbConfig {
}
oneof locality_config_specifier {
ZoneAwareLbConfig zone_aware_lb_config = 2;
LocalityWeightedLbConfig locality_weighted_lb_config = 3;
}
// If set, all health check/weight/metadata updates that happen within this duration will be
// merged and delivered in one shot when the duration expires. The start of the duration is when
// the first update happens. This is useful for big clusters, with potentially noisy deploys
// that might trigger excessive CPU usage due to a constant stream of healthcheck state changes
// or metadata updates. The first set of updates to be seen apply immediately (e.g.: a new
// cluster).
//
// If this is not set, we default to a merge window of 1000ms. To disable it, set the merge
// window to 0.
//
// Note: merging does not apply to cluster membership changes (e.g.: adds/removes); this is
// because merging those updates isn't currently safe. See
// https://github.com/envoyproxy/envoy/pull/3941.
google.protobuf.Duration update_merge_window = 4;
// If set to true, Envoy will not consider new hosts when computing load balancing weights until
// they have been health checked for the first time. This will have no effect unless
// active health checking is also configured.
//
// Ignoring a host means that for any load balancing calculations that adjust weights based
// on the ratio of eligible hosts and total hosts (priority spillover, locality weighting and
// panic mode) Envoy will exclude these hosts in the denominator.
//
// For example, with hosts in two priorities P0 and P1, where P0 looks like
// {healthy, unhealthy (new), unhealthy (new)}
// and where P1 looks like
// {healthy, healthy}
// all traffic will still hit P0, as 1 / (3 - 2) = 1.
//
// Enabling this will allow scaling up the number of hosts for a given cluster without entering
// panic mode or triggering priority spillover, assuming the hosts pass the first health check.
//
// If panic mode is triggered, new hosts are still eligible for traffic; they simply do not
// contribute to the calculation when deciding whether panic mode is enabled or not.
bool ignore_new_hosts_until_first_hc = 5;
}
// Common configuration for all load balancer implementations.
CommonLbConfig common_lb_config = 27;
// Optional custom transport socket implementation to use for upstream connections.
core.TransportSocket transport_socket = 24;
// The Metadata field can be used to provide additional information about the
// cluster. It can be used for stats, logging, and varying filter behavior.
// Fields should use reverse DNS notation to denote which entity within Envoy
// will need the information. For instance, if the metadata is intended for
// the Router filter, the filter name should be specified as *envoy.router*.
core.Metadata metadata = 25;
enum ClusterProtocolSelection {
// Cluster can only operate on one of the possible upstream protocols (HTTP1.1, HTTP2).
// If :ref:`http2_protocol_options <envoy_api_field_Cluster.http2_protocol_options>` are
// present, HTTP2 will be used, otherwise HTTP1.1 will be used.
USE_CONFIGURED_PROTOCOL = 0;
// Use HTTP1.1 or HTTP2, depending on which one is used on the downstream connection.
USE_DOWNSTREAM_PROTOCOL = 1;
}
// Determines how Envoy selects the protocol used to speak to upstream hosts.
ClusterProtocolSelection protocol_selection = 26;
// Optional options for upstream connections.
envoy.api.v2.UpstreamConnectionOptions upstream_connection_options = 30;
// If an upstream host becomes unhealthy (as determined by the configured health checks
// or outlier detection), immediately close all connections to the failed host.
//
// .. note::
//
// This is currently only supported for connections created by tcp_proxy.
//
// .. note::
//
// The current implementation of this feature closes all connections immediately when
// the unhealthy status is detected. If there are a large number of connections open
// to an upstream host that becomes unhealthy, Envoy may spend a substantial amount of
// time exclusively closing these connections, and not processing any other traffic.
bool close_connections_on_host_health_failure = 31;
// If this cluster uses EDS or STRICT_DNS to configure its hosts, immediately drain
// connections from any hosts that are removed from service discovery.
//
// This only affects behavior for hosts that are being actively health checked.
// If this flag is not set to true, Envoy will wait until the hosts fail active health
// checking before removing it from the cluster.
bool drain_connections_on_host_removal = 32;
}
// An extensible structure containing the address Envoy should bind to when
// establishing upstream connections.
message UpstreamBindConfig {
// The address Envoy should bind to when establishing upstream connections.
core.Address source_address = 1;
}
message UpstreamConnectionOptions {
// If set then set SO_KEEPALIVE on the socket to enable TCP Keepalives.
core.TcpKeepalive tcp_keepalive = 1;
}

View File

@ -1,70 +0,0 @@
syntax = "proto3";
package envoy.api.v2.cluster;
option java_outer_classname = "CircuitBreakerProto";
option java_multiple_files = true;
option java_package = "io.envoyproxy.envoy.api.v2.cluster";
option go_package = "cluster";
option csharp_namespace = "Envoy.Api.V2.ClusterNS";
option ruby_package = "Envoy.Api.V2.ClusterNS";
import "envoy/api/v2/core/base.proto";
import "google/protobuf/wrappers.proto";
import "gogoproto/gogo.proto";
option (gogoproto.equal_all) = true;
// [#protodoc-title: Circuit breakers]
// :ref:`Circuit breaking<arch_overview_circuit_break>` settings can be
// specified individually for each defined priority.
message CircuitBreakers {
// A Thresholds defines CircuitBreaker settings for a
// :ref:`RoutingPriority<envoy_api_enum_core.RoutingPriority>`.
message Thresholds {
// The :ref:`RoutingPriority<envoy_api_enum_core.RoutingPriority>`
// the specified CircuitBreaker settings apply to.
// [#comment:TODO(htuch): add (validate.rules).enum.defined_only = true once
// https://github.com/lyft/protoc-gen-validate/issues/42 is resolved.]
core.RoutingPriority priority = 1;
// The maximum number of connections that Envoy will make to the upstream
// cluster. If not specified, the default is 1024.
google.protobuf.UInt32Value max_connections = 2;
// The maximum number of pending requests that Envoy will allow to the
// upstream cluster. If not specified, the default is 1024.
google.protobuf.UInt32Value max_pending_requests = 3;
// The maximum number of parallel requests that Envoy will make to the
// upstream cluster. If not specified, the default is 1024.
google.protobuf.UInt32Value max_requests = 4;
// The maximum number of parallel retries that Envoy will allow to the
// upstream cluster. If not specified, the default is 3.
google.protobuf.UInt32Value max_retries = 5;
// If track_remaining is true, then stats will be published that expose
// the number of resources remaining until the circuit breakers open. If
// not specified, the default is false.
bool track_remaining = 6;
// The maximum number of connection pools per cluster that Envoy will concurrently support at
// once. If not specified, the default is unlimited. Set this for clusters which create a
// large number of connection pools. See
// :ref:`Circuit Breaking <arch_overview_circuit_break_cluster_maximum_connection_pools>` for
// more details.
google.protobuf.UInt32Value max_connection_pools = 7;
}
// If multiple :ref:`Thresholds<envoy_api_msg_cluster.CircuitBreakers.Thresholds>`
// are defined with the same :ref:`RoutingPriority<envoy_api_enum_core.RoutingPriority>`,
// the first one in the list is used. If no Thresholds is defined for a given
// :ref:`RoutingPriority<envoy_api_enum_core.RoutingPriority>`, the default values
// are used.
repeated Thresholds thresholds = 1;
}

View File

@ -1,84 +0,0 @@
syntax = "proto3";
package envoy.api.v2.cluster;
option java_outer_classname = "OutlierDetectionProto";
option java_multiple_files = true;
option java_package = "io.envoyproxy.envoy.api.v2.cluster";
option csharp_namespace = "Envoy.Api.V2.ClusterNS";
option ruby_package = "Envoy.Api.V2.ClusterNS";
import "google/protobuf/duration.proto";
import "google/protobuf/wrappers.proto";
import "validate/validate.proto";
import "gogoproto/gogo.proto";
option (gogoproto.equal_all) = true;
// [#protodoc-title: Outlier detection]
// See the :ref:`architecture overview <arch_overview_outlier_detection>` for
// more information on outlier detection.
message OutlierDetection {
// The number of consecutive 5xx responses before a consecutive 5xx ejection
// occurs. Defaults to 5.
google.protobuf.UInt32Value consecutive_5xx = 1;
// The time interval between ejection analysis sweeps. This can result in
// both new ejections as well as hosts being returned to service. Defaults
// to 10000ms or 10s.
google.protobuf.Duration interval = 2 [(validate.rules).duration.gt = {}];
// The base time that a host is ejected for. The real time is equal to the
// base time multiplied by the number of times the host has been ejected.
// Defaults to 30000ms or 30s.
google.protobuf.Duration base_ejection_time = 3 [(validate.rules).duration.gt = {}];
// The maximum % of an upstream cluster that can be ejected due to outlier
// detection. Defaults to 10% but will eject at least one host regardless of the value.
google.protobuf.UInt32Value max_ejection_percent = 4 [(validate.rules).uint32.lte = 100];
// The % chance that a host will be actually ejected when an outlier status
// is detected through consecutive 5xx. This setting can be used to disable
// ejection or to ramp it up slowly. Defaults to 100.
google.protobuf.UInt32Value enforcing_consecutive_5xx = 5 [(validate.rules).uint32.lte = 100];
// The % chance that a host will be actually ejected when an outlier status
// is detected through success rate statistics. This setting can be used to
// disable ejection or to ramp it up slowly. Defaults to 100.
google.protobuf.UInt32Value enforcing_success_rate = 6 [(validate.rules).uint32.lte = 100];
// The number of hosts in a cluster that must have enough request volume to
// detect success rate outliers. If the number of hosts is less than this
// setting, outlier detection via success rate statistics is not performed
// for any host in the cluster. Defaults to 5.
google.protobuf.UInt32Value success_rate_minimum_hosts = 7;
// The minimum number of total requests that must be collected in one
// interval (as defined by the interval duration above) to include this host
// in success rate based outlier detection. If the volume is lower than this
// setting, outlier detection via success rate statistics is not performed
// for that host. Defaults to 100.
google.protobuf.UInt32Value success_rate_request_volume = 8;
// This factor is used to determine the ejection threshold for success rate
// outlier ejection. The ejection threshold is the difference between the
// mean success rate, and the product of this factor and the standard
// deviation of the mean success rate: mean - (stdev *
// success_rate_stdev_factor). This factor is divided by a thousand to get a
// double. That is, if the desired factor is 1.9, the runtime value should
// be 1900. Defaults to 1900.
google.protobuf.UInt32Value success_rate_stdev_factor = 9;
// The number of consecutive gateway failures (502, 503, 504 status or
// connection errors that are mapped to one of those status codes) before a
// consecutive gateway failure ejection occurs. Defaults to 5.
google.protobuf.UInt32Value consecutive_gateway_failure = 10;
// The % chance that a host will be actually ejected when an outlier status
// is detected through consecutive gateway failures. This setting can be
// used to disable ejection or to ramp it up slowly. Defaults to 0.
google.protobuf.UInt32Value enforcing_consecutive_gateway_failure = 11
[(validate.rules).uint32.lte = 100];
}

View File

@ -83,8 +83,7 @@ message TcpKeepalive {
message BindConfig {
// The address to bind to when creating a socket.
SocketAddress source_address = 1
[(validate.rules).message.required = true, (gogoproto.nullable) = false];
SocketAddress source_address = 1 [(validate.rules).message.required = true];
// Whether to set the *IP_FREEBIND* option when creating the socket. When this
// flag is set to true, allows the :ref:`source_address

View File

@ -7,6 +7,8 @@ option java_multiple_files = true;
option java_package = "io.envoyproxy.envoy.api.v2.core";
option go_package = "core";
import "envoy/api/v2/core/http_uri.proto";
import "google/protobuf/any.proto";
import "google/protobuf/struct.proto";
import "google/protobuf/wrappers.proto";
@ -187,6 +189,28 @@ message DataSource {
}
}
// The message specifies how to fetch data from remote and how to verify it.
message RemoteDataSource {
// The HTTP URI to fetch the remote data.
HttpUri http_uri = 1 [(validate.rules).message.required = true];
// SHA256 string for verifying data.
string sha256 = 2 [(validate.rules).string.min_bytes = 1];
}
// Async data source which support async data fetch.
message AsyncDataSource {
oneof specifier {
option (validate.required) = true;
// Local async data source.
DataSource local = 1;
// Remote async data source.
RemoteDataSource remote = 2;
}
}
// Configuration for transport socket in :ref:`listeners <config_listeners>` and
// :ref:`clusters <envoy_api_msg_Cluster>`. If the configuration is
// empty, a default transport socket implementation and configuration will be
@ -234,8 +258,7 @@ message SocketOption {
}
// The state in which the option will be applied. When used in BindConfig
// STATE_PREBIND is currently the only valid value.
SocketState state = 6
[(validate.rules).message.required = true, (validate.rules).enum.defined_only = true];
SocketState state = 6 [(validate.rules).enum.defined_only = true];
}
// Runtime derived FractionalPercent with defaults for when the numerator or denominator is not
@ -255,3 +278,15 @@ message ControlPlane {
// the Envoy is connected to.
string identifier = 1;
}
// Identifies the direction of the traffic relative to the local Envoy.
enum TrafficDirection {
// Default option is unspecified.
UNSPECIFIED = 0;
// The transport is used for incoming traffic.
INBOUND = 1;
// The transport is used for outgoing traffic.
OUTBOUND = 2;
}

View File

@ -1,124 +0,0 @@
syntax = "proto3";
package envoy.api.v2.core;
option java_outer_classname = "ConfigSourceProto";
option java_multiple_files = true;
option java_package = "io.envoyproxy.envoy.api.v2.core";
import "envoy/api/v2/core/grpc_service.proto";
import "google/protobuf/duration.proto";
import "google/protobuf/wrappers.proto";
import "validate/validate.proto";
import "gogoproto/gogo.proto";
option (gogoproto.equal_all) = true;
// [#protodoc-title: Configuration sources]
// API configuration source. This identifies the API type and cluster that Envoy
// will use to fetch an xDS API.
message ApiConfigSource {
// APIs may be fetched via either REST or gRPC.
enum ApiType {
// Ideally this would be 'reserved 0' but one can't reserve the default
// value. Instead we throw an exception if this is ever used.
UNSUPPORTED_REST_LEGACY = 0 [deprecated = true];
// REST-JSON v2 API. The `canonical JSON encoding
// <https://developers.google.com/protocol-buffers/docs/proto3#json>`_ for
// the v2 protos is used.
REST = 1;
// gRPC v2 API.
GRPC = 2;
// Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response}
// rather than Discovery{Request,Response}. Rather than sending Envoy the entire state
// with every update, the xDS server only sends what has changed since the last update.
//
// DELTA_GRPC is not yet entirely implemented! Initially, only CDS is available.
// Do not use for other xDSes. TODO(fredlas) update/remove this warning when appropriate.
DELTA_GRPC = 3;
}
ApiType api_type = 1 [(validate.rules).enum.defined_only = true];
// Cluster names should be used only with REST. If > 1
// cluster is defined, clusters will be cycled through if any kind of failure
// occurs.
//
// .. note::
//
// The cluster with name ``cluster_name`` must be statically defined and its
// type must not be ``EDS``.
repeated string cluster_names = 2;
// Multiple gRPC services be provided for GRPC. If > 1 cluster is defined,
// services will be cycled through if any kind of failure occurs.
repeated GrpcService grpc_services = 4;
// For REST APIs, the delay between successive polls.
google.protobuf.Duration refresh_delay = 3 [(gogoproto.stdduration) = true];
// For REST APIs, the request timeout. If not set, a default value of 1s will be used.
google.protobuf.Duration request_timeout = 5
[(validate.rules).duration.gt.seconds = 0, (gogoproto.stdduration) = true];
// For GRPC APIs, the rate limit settings. If present, discovery requests made by Envoy will be
// rate limited.
RateLimitSettings rate_limit_settings = 6;
}
// Aggregated Discovery Service (ADS) options. This is currently empty, but when
// set in :ref:`ConfigSource <envoy_api_msg_core.ConfigSource>` can be used to
// specify that ADS is to be used.
message AggregatedConfigSource {
}
// Rate Limit settings to be applied for discovery requests made by Envoy.
message RateLimitSettings {
// Maximum number of tokens to be used for rate limiting discovery request calls. If not set, a
// default value of 100 will be used.
google.protobuf.UInt32Value max_tokens = 1;
// Rate at which tokens will be filled per second. If not set, a default fill rate of 10 tokens
// per second will be used.
google.protobuf.DoubleValue fill_rate = 2 [(validate.rules).double.gt = 0.0];
}
// Configuration for :ref:`listeners <config_listeners>`, :ref:`clusters
// <config_cluster_manager>`, :ref:`routes
// <envoy_api_msg_RouteConfiguration>`, :ref:`endpoints
// <arch_overview_service_discovery>` etc. may either be sourced from the
// filesystem or from an xDS API source. Filesystem configs are watched with
// inotify for updates.
message ConfigSource {
oneof config_source_specifier {
option (validate.required) = true;
// Path on the filesystem to source and watch for configuration updates.
//
// .. note::
//
// The path to the source must exist at config load time.
//
// .. note::
//
// Envoy will only watch the file path for *moves.* This is because in general only moves
// are atomic. The same method of swapping files as is demonstrated in the
// :ref:`runtime documentation <config_runtime_symbolic_link_swap>` can be used here also.
string path = 1;
// API configuration source.
ApiConfigSource api_config_source = 2;
// When set, ADS will be used to fetch resources. The ADS API configuration
// source in the bootstrap configuration is used.
AggregatedConfigSource ads = 3;
}
// Optional initialization timeout.
// When this timeout is specified, Envoy will wait no longer than the specified time for first
// config response on this xDS subscription during the :ref:`initialization process
// <arch_overview_initialization>`. After reaching the timeout, Envoy will move to the next
// initialization phase, even if the first config is not delivered yet. The timer is activated
// when the xDS API subscription starts, and is disarmed on first config update or on error. 0
// means no timeout - Envoy will wait indefinitely for the first xDS config (unless another
// timeout applies). Default 0.
google.protobuf.Duration initial_fetch_timeout = 4;
}

View File

@ -1,173 +0,0 @@
syntax = "proto3";
package envoy.api.v2.core;
option java_outer_classname = "GrpcServiceProto";
option java_multiple_files = true;
option java_package = "io.envoyproxy.envoy.api.v2.core";
import "envoy/api/v2/core/base.proto";
import "google/protobuf/any.proto";
import "google/protobuf/duration.proto";
import "google/protobuf/struct.proto";
import "google/protobuf/empty.proto";
import "validate/validate.proto";
import "gogoproto/gogo.proto";
option (gogoproto.equal_all) = true;
// [#protodoc-title: gRPC services]
// gRPC service configuration. This is used by :ref:`ApiConfigSource
// <envoy_api_msg_core.ApiConfigSource>` and filter configurations.
message GrpcService {
message EnvoyGrpc {
// The name of the upstream gRPC cluster. SSL credentials will be supplied
// in the :ref:`Cluster <envoy_api_msg_Cluster>` :ref:`tls_context
// <envoy_api_field_Cluster.tls_context>`.
string cluster_name = 1 [(validate.rules).string.min_bytes = 1];
}
// [#proto-status: draft]
message GoogleGrpc {
// The target URI when using the `Google C++ gRPC client
// <https://github.com/grpc/grpc>`_. SSL credentials will be supplied in
// :ref:`channel_credentials <envoy_api_field_core.GrpcService.GoogleGrpc.channel_credentials>`.
string target_uri = 1 [(validate.rules).string.min_bytes = 1];
// See https://grpc.io/grpc/cpp/structgrpc_1_1_ssl_credentials_options.html.
message SslCredentials {
// PEM encoded server root certificates.
DataSource root_certs = 1;
// PEM encoded client private key.
DataSource private_key = 2;
// PEM encoded client certificate chain.
DataSource cert_chain = 3;
}
// Local channel credentials. Only UDS is supported for now.
// See https://github.com/grpc/grpc/pull/15909.
message GoogleLocalCredentials {
}
// See https://grpc.io/docs/guides/auth.html#credential-types to understand Channel and Call
// credential types.
message ChannelCredentials {
oneof credential_specifier {
option (validate.required) = true;
SslCredentials ssl_credentials = 1;
// https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61
google.protobuf.Empty google_default = 2;
GoogleLocalCredentials local_credentials = 3;
}
}
ChannelCredentials channel_credentials = 2;
message CallCredentials {
message ServiceAccountJWTAccessCredentials {
string json_key = 1;
uint64 token_lifetime_seconds = 2;
}
message GoogleIAMCredentials {
string authorization_token = 1;
string authority_selector = 2;
}
message MetadataCredentialsFromPlugin {
string name = 1;
oneof config_type {
google.protobuf.Struct config = 2;
google.protobuf.Any typed_config = 3;
}
}
oneof credential_specifier {
option (validate.required) = true;
// Access token credentials.
// https://grpc.io/grpc/cpp/namespacegrpc.html#ad3a80da696ffdaea943f0f858d7a360d.
string access_token = 1;
// Google Compute Engine credentials.
// https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61
google.protobuf.Empty google_compute_engine = 2;
// Google refresh token credentials.
// https://grpc.io/grpc/cpp/namespacegrpc.html#a96901c997b91bc6513b08491e0dca37c.
string google_refresh_token = 3;
// Service Account JWT Access credentials.
// https://grpc.io/grpc/cpp/namespacegrpc.html#a92a9f959d6102461f66ee973d8e9d3aa.
ServiceAccountJWTAccessCredentials service_account_jwt_access = 4;
// Google IAM credentials.
// https://grpc.io/grpc/cpp/namespacegrpc.html#a9fc1fc101b41e680d47028166e76f9d0.
GoogleIAMCredentials google_iam = 5;
// Custom authenticator credentials.
// https://grpc.io/grpc/cpp/namespacegrpc.html#a823c6a4b19ffc71fb33e90154ee2ad07.
// https://grpc.io/docs/guides/auth.html#extending-grpc-to-support-other-authentication-mechanisms.
MetadataCredentialsFromPlugin from_plugin = 6;
}
}
// A set of call credentials that can be composed with `channel credentials
// <https://grpc.io/docs/guides/auth.html#credential-types>`_.
repeated CallCredentials call_credentials = 3;
// The human readable prefix to use when emitting statistics for the gRPC
// service.
//
// .. csv-table::
// :header: Name, Type, Description
// :widths: 1, 1, 2
//
// streams_total, Counter, Total number of streams opened
// streams_closed_<gRPC status code>, Counter, Total streams closed with <gRPC status code>
string stat_prefix = 4 [(validate.rules).string.min_bytes = 1];
// The name of the Google gRPC credentials factory to use. This must have been registered with
// Envoy. If this is empty, a default credentials factory will be used that sets up channel
// credentials based on other configuration parameters.
string credentials_factory_name = 5;
// Additional configuration for site-specific customizations of the Google
// gRPC library.
google.protobuf.Struct config = 6;
}
oneof target_specifier {
option (validate.required) = true;
// Envoy's in-built gRPC client.
// See the :ref:`gRPC services overview <arch_overview_grpc_services>`
// documentation for discussion on gRPC client selection.
EnvoyGrpc envoy_grpc = 1;
// `Google C++ gRPC client <https://github.com/grpc/grpc>`_
// See the :ref:`gRPC services overview <arch_overview_grpc_services>`
// documentation for discussion on gRPC client selection.
GoogleGrpc google_grpc = 2;
}
// The timeout for the gRPC request. This is the timeout for a specific
// request.
google.protobuf.Duration timeout = 3;
// Field 4 reserved due to moving credentials inside the GoogleGrpc message
reserved 4;
// Additional metadata to include in streams initiated to the GrpcService.
// This can be used for scenarios in which additional ad hoc authorization
// headers (e.g. `x-foo-bar: baz-key`) are to be injected.
repeated HeaderValue initial_metadata = 5;
}

View File

@ -0,0 +1,54 @@
syntax = "proto3";
package envoy.api.v2.core;
option java_outer_classname = "HttpUriProto";
option java_multiple_files = true;
option java_package = "io.envoyproxy.envoy.api.v2.core";
import "google/protobuf/duration.proto";
import "gogoproto/gogo.proto";
import "validate/validate.proto";
option (gogoproto.equal_all) = true;
// [#protodoc-title: HTTP Service URI ]
// Envoy external URI descriptor
message HttpUri {
// The HTTP server URI. It should be a full FQDN with protocol, host and path.
//
// Example:
//
// .. code-block:: yaml
//
// uri: https://www.googleapis.com/oauth2/v1/certs
//
string uri = 1 [(validate.rules).string.min_bytes = 1];
// Specify how `uri` is to be fetched. Today, this requires an explicit
// cluster, but in the future we may support dynamic cluster creation or
// inline DNS resolution. See `issue
// <https://github.com/envoyproxy/envoy/issues/1606>`_.
oneof http_upstream_type {
option (validate.required) = true;
// A cluster is created in the Envoy "cluster_manager" config
// section. This field specifies the cluster name.
//
// Example:
//
// .. code-block:: yaml
//
// cluster: jwks_cluster
//
string cluster = 2 [(validate.rules).string.min_bytes = 1];
}
// Sets the maximum duration in milliseconds that a response can take to arrive upon request.
google.protobuf.Duration timeout = 3 [
(validate.rules).duration.gte = {},
(validate.rules).duration.required = true,
(gogoproto.stdduration) = true
];
}

View File

@ -1,99 +0,0 @@
// [#protodoc-title: Protocol options]
syntax = "proto3";
package envoy.api.v2.core;
option java_outer_classname = "ProtocolProto";
option java_multiple_files = true;
option java_package = "io.envoyproxy.envoy.api.v2.core";
import "google/protobuf/duration.proto";
import "google/protobuf/wrappers.proto";
import "validate/validate.proto";
import "gogoproto/gogo.proto";
option (gogoproto.equal_all) = true;
// [#protodoc-title: Protocol options]
// [#not-implemented-hide:]
message TcpProtocolOptions {
}
message HttpProtocolOptions {
// The idle timeout for upstream connection pool connections. The idle timeout is defined as the
// period in which there are no active requests. If not set, there is no idle timeout. When the
// idle timeout is reached the connection will be closed. Note that request based timeouts mean
// that HTTP/2 PINGs will not keep the connection alive.
google.protobuf.Duration idle_timeout = 1 [(gogoproto.stdduration) = true];
}
message Http1ProtocolOptions {
// Handle HTTP requests with absolute URLs in the requests. These requests
// are generally sent by clients to forward/explicit proxies. This allows clients to configure
// envoy as their HTTP proxy. In Unix, for example, this is typically done by setting the
// *http_proxy* environment variable.
google.protobuf.BoolValue allow_absolute_url = 1;
// Handle incoming HTTP/1.0 and HTTP 0.9 requests.
// This is off by default, and not fully standards compliant. There is support for pre-HTTP/1.1
// style connect logic, dechunking, and handling lack of client host iff
// *default_host_for_http_10* is configured.
bool accept_http_10 = 2;
// A default host for HTTP/1.0 requests. This is highly suggested if *accept_http_10* is true as
// Envoy does not otherwise support HTTP/1.0 without a Host header.
// This is a no-op if *accept_http_10* is not true.
string default_host_for_http_10 = 3;
}
message Http2ProtocolOptions {
// `Maximum table size <https://httpwg.org/specs/rfc7541.html#rfc.section.4.2>`_
// (in octets) that the encoder is permitted to use for the dynamic HPACK table. Valid values
// range from 0 to 4294967295 (2^32 - 1) and defaults to 4096. 0 effectively disables header
// compression.
google.protobuf.UInt32Value hpack_table_size = 1;
// `Maximum concurrent streams <https://httpwg.org/specs/rfc7540.html#rfc.section.5.1.2>`_
// allowed for peer on one HTTP/2 connection. Valid values range from 1 to 2147483647 (2^31 - 1)
// and defaults to 2147483647.
google.protobuf.UInt32Value max_concurrent_streams = 2
[(validate.rules).uint32 = {gte: 1, lte: 2147483647}];
// `Initial stream-level flow-control window
// <https://httpwg.org/specs/rfc7540.html#rfc.section.6.9.2>`_ size. Valid values range from 65535
// (2^16 - 1, HTTP/2 default) to 2147483647 (2^31 - 1, HTTP/2 maximum) and defaults to 268435456
// (256 * 1024 * 1024).
//
// NOTE: 65535 is the initial window size from HTTP/2 spec. We only support increasing the default
// window size now, so it's also the minimum.
// This field also acts as a soft limit on the number of bytes Envoy will buffer per-stream in the
// HTTP/2 codec buffers. Once the buffer reaches this pointer, watermark callbacks will fire to
// stop the flow of data to the codec buffers.
google.protobuf.UInt32Value initial_stream_window_size = 3
[(validate.rules).uint32 = {gte: 65535, lte: 2147483647}];
// Similar to *initial_stream_window_size*, but for connection-level flow-control
// window. Currently, this has the same minimum/maximum/default as *initial_stream_window_size*.
google.protobuf.UInt32Value initial_connection_window_size = 4
[(validate.rules).uint32 = {gte: 65535, lte: 2147483647}];
// Allows proxying Websocket and other upgrades over H2 connect.
bool allow_connect = 5;
// [#not-implemented-hide:] Hiding until envoy has full metadata support.
// Still under implementation. DO NOT USE.
//
// Allows metadata. See [metadata
// docs](https://github.com/envoyproxy/envoy/blob/master/source/docs/h2_metadata.md) for more
// information.
bool allow_metadata = 6;
}
// [#not-implemented-hide:]
message GrpcProtocolOptions {
Http2ProtocolOptions http2_protocol_options = 1;
}

View File

@ -35,10 +35,10 @@ message DiscoveryRequest {
// List of resources to subscribe to, e.g. list of cluster names or a route
// configuration name. If this is empty, all resources for the API are
// returned. LDS/CDS expect empty resource_names, since this is global
// discovery for the Envoy instance. The LDS and CDS responses will then imply
// a number of resources that need to be fetched via EDS/RDS, which will be
// explicitly enumerated in resource_names.
// returned. LDS/CDS may have empty resource_names, which will cause all
// resources for the Envoy instance to be returned. The LDS and CDS responses
// will then imply a number of resources that need to be fetched via EDS/RDS,
// which will be explicitly enumerated in resource_names.
repeated string resource_names = 3;
// Type of the resource that is being requested, e.g.
@ -65,7 +65,7 @@ message DiscoveryResponse {
string version_info = 1;
// The response resources. These resources are typed and depend on the API being called.
repeated google.protobuf.Any resources = 2 [(gogoproto.nullable) = false];
repeated google.protobuf.Any resources = 2;
// [#not-implemented-hide:]
// Canary is used to support two Envoy command line flags:
@ -196,7 +196,7 @@ message DeltaDiscoveryResponse {
// The response resources. These are typed resources, whose types must match
// the type_url field.
repeated Resource resources = 2 [(gogoproto.nullable) = false];
repeated Resource resources = 2;
// field id 3 IS available!

View File

@ -59,7 +59,7 @@ message ClusterLoadAssignment {
string cluster_name = 1 [(validate.rules).string.min_bytes = 1];
// List of endpoints to load balance to.
repeated endpoint.LocalityLbEndpoints endpoints = 2 [(gogoproto.nullable) = false];
repeated endpoint.LocalityLbEndpoints endpoints = 2;
// Map of named endpoints that can be referenced in LocalityLbEndpoints.
map<string, endpoint.Endpoint> named_endpoints = 5;

View File

@ -74,22 +74,15 @@ message LbEndpoint {
// to subset the endpoints considered in cluster load balancing.
core.Metadata metadata = 3;
// The optional load balancing weight of the upstream host, in the range 1 -
// 128. Envoy uses the load balancing weight in some of the built in load
// The optional load balancing weight of the upstream host; at least 1.
// Envoy uses the load balancing weight in some of the built in load
// balancers. The load balancing weight for an endpoint is divided by the sum
// of the weights of all endpoints in the endpoint's locality to produce a
// percentage of traffic for the endpoint. This percentage is then further
// weighted by the endpoint's locality's load balancing weight from
// LocalityLbEndpoints. If unspecified, each host is presumed to have equal
// weight in a locality.
//
// .. attention::
//
// The limit of 128 is somewhat arbitrary, but is applied due to performance
// concerns with the current implementation and can be removed when
// `this issue <https://github.com/envoyproxy/envoy/issues/1285>`_ is fixed.
google.protobuf.UInt32Value load_balancing_weight = 4
[(validate.rules).uint32 = {gte: 1, lte: 128}];
google.protobuf.UInt32Value load_balancing_weight = 4 [(validate.rules).uint32 = {gte: 1}];
}
// A group of endpoints belonging to a Locality.
@ -101,9 +94,9 @@ message LocalityLbEndpoints {
core.Locality locality = 1;
// The group of endpoints belonging to the locality specified.
repeated LbEndpoint lb_endpoints = 2 [(gogoproto.nullable) = false];
repeated LbEndpoint lb_endpoints = 2;
// Optional: Per priority/region/zone/sub_zone weight - range 1-128. The load
// Optional: Per priority/region/zone/sub_zone weight; at least 1. The load
// balancing weight for a locality is divided by the sum of the weights of all
// localities at the same priority level to produce the effective percentage
// of traffic for the locality.
@ -113,14 +106,7 @@ message LocalityLbEndpoints {
// configured. These weights are ignored otherwise. If no weights are
// specified when locality weighted load balancing is enabled, the locality is
// assigned no load.
//
// .. attention::
//
// The limit of 128 is somewhat arbitrary, but is applied due to performance
// concerns with the current implementation and can be removed when
// `this issue <https://github.com/envoyproxy/envoy/issues/1285>`_ is fixed.
google.protobuf.UInt32Value load_balancing_weight = 3
[(validate.rules).uint32 = {gte: 1, lte: 128}];
google.protobuf.UInt32Value load_balancing_weight = 3 [(validate.rules).uint32 = {gte: 1}];
// Optional: the priority for this LocalityLbEndpoints. If unspecified this will
// default to the highest priority (0).

201
xds/third_party/udpa/LICENSE vendored Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

54
xds/third_party/udpa/import.sh vendored Executable file
View File

@ -0,0 +1,54 @@
#!/bin/bash
# Copyright 2018 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Update VERSION then in this directory run ./import.sh
set -e
BRANCH=master
# import VERSION from one of the google internal CLs
VERSION=94324803a497c8f76dbc78df393ef629d3a9f3c3
GIT_REPO="https://github.com/cncf/udpa.git"
GIT_BASE_DIR=udpa
SOURCE_PROTO_BASE_DIR=udpa
TARGET_PROTO_BASE_DIR=src/main/proto
FILES=(
udpa/data/orca/v1/orca_load_report.proto
udpa/service/orca/v1/orca.proto
)
# clone the udpa github repo in a tmp directory
tmpdir="$(mktemp -d)"
pushd "${tmpdir}"
rm -rf $GIT_BASE_DIR
git clone -b $BRANCH $GIT_REPO
cd "$GIT_BASE_DIR"
git checkout $VERSION
popd
cp -p "${tmpdir}/${GIT_BASE_DIR}/LICENSE" LICENSE
rm -rf "${TARGET_PROTO_BASE_DIR}"
mkdir -p "${TARGET_PROTO_BASE_DIR}"
pushd "${TARGET_PROTO_BASE_DIR}"
# copy proto files to project directory
for file in "${FILES[@]}"
do
mkdir -p "$(dirname "${file}")"
cp -p "${tmpdir}/${SOURCE_PROTO_BASE_DIR}/${file}" "${file}"
done
popd
rm -rf "$tmpdir"

View File

@ -4,8 +4,7 @@ package udpa.data.orca.v1;
option java_outer_classname = "OrcaLoadReportProto";
option java_multiple_files = true;
option java_package = "io.envoyproxy.udpa.data.orca.v1";
option go_package = "v1";
option java_package = "com.github.udpa.udpa.data.orca.v1";
import "validate/validate.proto";

View File

@ -4,8 +4,7 @@ package udpa.service.orca.v1;
option java_outer_classname = "OrcaProto";
option java_multiple_files = true;
option java_package = "io.envoyproxy.udpa.service.orca.v1";
option go_package = "v1";
option java_package = "com.github.udpa.udpa.service.orca.v1";
import "udpa/data/orca/v1/orca_load_report.proto";