public static final class PredictionServiceGrpc.PredictionServiceStub extends AbstractAsyncStub<PredictionServiceGrpc.PredictionServiceStub>
A stub to allow clients to do asynchronous rpc calls to service PredictionService.
A service for online predictions and explanations.
Inheritance
java.lang.Object >
io.grpc.stub.AbstractStub >
io.grpc.stub.AbstractAsyncStub >
PredictionServiceGrpc.PredictionServiceStub
Inherited Members
io.grpc.stub.AbstractAsyncStub.<T>newStub(io.grpc.stub.AbstractStub.StubFactory<T>,io.grpc.Channel)
io.grpc.stub.AbstractAsyncStub.<T>newStub(io.grpc.stub.AbstractStub.StubFactory<T>,io.grpc.Channel,io.grpc.CallOptions)
io.grpc.stub.AbstractStub.<T>withOption(io.grpc.CallOptions.Key<T>,T)
io.grpc.stub.AbstractStub.build(io.grpc.Channel,io.grpc.CallOptions)
io.grpc.stub.AbstractStub.getCallOptions()
io.grpc.stub.AbstractStub.getChannel()
io.grpc.stub.AbstractStub.withCallCredentials(io.grpc.CallCredentials)
io.grpc.stub.AbstractStub.withChannel(io.grpc.Channel)
io.grpc.stub.AbstractStub.withCompression(java.lang.String)
io.grpc.stub.AbstractStub.withDeadline(io.grpc.Deadline)
io.grpc.stub.AbstractStub.withDeadlineAfter(long,java.util.concurrent.TimeUnit)
io.grpc.stub.AbstractStub.withExecutor(java.util.concurrent.Executor)
io.grpc.stub.AbstractStub.withInterceptors(io.grpc.ClientInterceptor...)
io.grpc.stub.AbstractStub.withMaxInboundMessageSize(int)
io.grpc.stub.AbstractStub.withMaxOutboundMessageSize(int)
io.grpc.stub.AbstractStub.withWaitForReady()
Methods
protected PredictionServiceGrpc.PredictionServiceStub build(Channel channel, CallOptions callOptions)
Parameters |
Name |
Description |
channel |
io.grpc.Channel
|
callOptions |
io.grpc.CallOptions
|
Overrides
io.grpc.stub.AbstractStub.build(io.grpc.Channel,io.grpc.CallOptions)
chatCompletions(ChatCompletionsRequest request, StreamObserver<HttpBody> responseObserver)
public void chatCompletions(ChatCompletionsRequest request, StreamObserver<HttpBody> responseObserver)
Exposes an OpenAI-compatible endpoint for chat completions.
Parameters |
Name |
Description |
request |
ChatCompletionsRequest
|
responseObserver |
io.grpc.stub.StreamObserver<com.google.api.HttpBody>
|
public void countTokens(CountTokensRequest request, StreamObserver<CountTokensResponse> responseObserver)
Perform a token counting.
public void directPredict(DirectPredictRequest request, StreamObserver<DirectPredictResponse> responseObserver)
Perform an unary online prediction request to a gRPC model server for
Vertex first-party products and frameworks.
public void directRawPredict(DirectRawPredictRequest request, StreamObserver<DirectRawPredictResponse> responseObserver)
Perform an unary online prediction request to a gRPC model server for
custom containers.
public void explain(ExplainRequest request, StreamObserver<ExplainResponse> responseObserver)
Perform an online explanation.
If
deployed_model_id
is specified, the corresponding DeployModel must have
explanation_spec
populated. If
deployed_model_id
is not specified, all DeployedModels must have
explanation_spec
populated.
generateContent(GenerateContentRequest request, StreamObserver<GenerateContentResponse> responseObserver)
public void generateContent(GenerateContentRequest request, StreamObserver<GenerateContentResponse> responseObserver)
Generate content with multimodal inputs.
public void predict(PredictRequest request, StreamObserver<PredictResponse> responseObserver)
Perform an online prediction.
rawPredict(RawPredictRequest request, StreamObserver<HttpBody> responseObserver)
public void rawPredict(RawPredictRequest request, StreamObserver<HttpBody> responseObserver)
Perform an online prediction with an arbitrary HTTP payload.
The response includes the following HTTP headers:
X-Vertex-AI-Endpoint-Id
: ID of the
Endpoint that served this
prediction.
X-Vertex-AI-Deployed-Model-Id
: ID of the Endpoint's
DeployedModel that served
this prediction.
Parameters |
Name |
Description |
request |
RawPredictRequest
|
responseObserver |
io.grpc.stub.StreamObserver<com.google.api.HttpBody>
|
public void serverStreamingPredict(StreamingPredictRequest request, StreamObserver<StreamingPredictResponse> responseObserver)
Perform a server-side streaming online prediction request for Vertex
LLM streaming.
public StreamObserver<StreamDirectPredictRequest> streamDirectPredict(StreamObserver<StreamDirectPredictResponse> responseObserver)
Perform a streaming online prediction request to a gRPC model server for
Vertex first-party products and frameworks.
public StreamObserver<StreamDirectRawPredictRequest> streamDirectRawPredict(StreamObserver<StreamDirectRawPredictResponse> responseObserver)
Perform a streaming online prediction request to a gRPC model server for
custom containers.
streamGenerateContent(GenerateContentRequest request, StreamObserver<GenerateContentResponse> responseObserver)
public void streamGenerateContent(GenerateContentRequest request, StreamObserver<GenerateContentResponse> responseObserver)
Generate content with multimodal inputs with streaming support.
streamRawPredict(StreamRawPredictRequest request, StreamObserver<HttpBody> responseObserver)
public void streamRawPredict(StreamRawPredictRequest request, StreamObserver<HttpBody> responseObserver)
Perform a streaming online prediction with an arbitrary HTTP payload.
Parameters |
Name |
Description |
request |
StreamRawPredictRequest
|
responseObserver |
io.grpc.stub.StreamObserver<com.google.api.HttpBody>
|
public StreamObserver<StreamingPredictRequest> streamingPredict(StreamObserver<StreamingPredictResponse> responseObserver)
Perform a streaming online prediction request for Vertex first-party
products and frameworks.
public StreamObserver<StreamingRawPredictRequest> streamingRawPredict(StreamObserver<StreamingRawPredictResponse> responseObserver)
Perform a streaming online prediction request through gRPC.