Class BigQueryDestination (3.51.0)

public final class BigQueryDestination extends GeneratedMessageV3 implements BigQueryDestinationOrBuilder

A BigQuery destination for exporting assets to.

Protobuf type google.cloud.asset.v1.BigQueryDestination

Static Fields

DATASET_FIELD_NUMBER

public static final int DATASET_FIELD_NUMBER
Field Value
Type Description
int

FORCE_FIELD_NUMBER

public static final int FORCE_FIELD_NUMBER
Field Value
Type Description
int

PARTITION_SPEC_FIELD_NUMBER

public static final int PARTITION_SPEC_FIELD_NUMBER
Field Value
Type Description
int

SEPARATE_TABLES_PER_ASSET_TYPE_FIELD_NUMBER

public static final int SEPARATE_TABLES_PER_ASSET_TYPE_FIELD_NUMBER
Field Value
Type Description
int

TABLE_FIELD_NUMBER

public static final int TABLE_FIELD_NUMBER
Field Value
Type Description
int

Static Methods

getDefaultInstance()

public static BigQueryDestination getDefaultInstance()
Returns
Type Description
BigQueryDestination

getDescriptor()

public static final Descriptors.Descriptor getDescriptor()
Returns
Type Description
Descriptor

newBuilder()

public static BigQueryDestination.Builder newBuilder()
Returns
Type Description
BigQueryDestination.Builder

newBuilder(BigQueryDestination prototype)

public static BigQueryDestination.Builder newBuilder(BigQueryDestination prototype)
Parameter
Name Description
prototype BigQueryDestination
Returns
Type Description
BigQueryDestination.Builder

parseDelimitedFrom(InputStream input)

public static BigQueryDestination parseDelimitedFrom(InputStream input)
Parameter
Name Description
input InputStream
Returns
Type Description
BigQueryDestination
Exceptions
Type Description
IOException

parseDelimitedFrom(InputStream input, ExtensionRegistryLite extensionRegistry)

public static BigQueryDestination parseDelimitedFrom(InputStream input, ExtensionRegistryLite extensionRegistry)
Parameters
Name Description
input InputStream
extensionRegistry ExtensionRegistryLite
Returns
Type Description
BigQueryDestination
Exceptions
Type Description
IOException

parseFrom(byte[] data)

public static BigQueryDestination parseFrom(byte[] data)
Parameter
Name Description
data byte[]
Returns
Type Description
BigQueryDestination
Exceptions
Type Description
InvalidProtocolBufferException

parseFrom(byte[] data, ExtensionRegistryLite extensionRegistry)

public static BigQueryDestination parseFrom(byte[] data, ExtensionRegistryLite extensionRegistry)
Parameters
Name Description
data byte[]
extensionRegistry ExtensionRegistryLite
Returns
Type Description
BigQueryDestination
Exceptions
Type Description
InvalidProtocolBufferException

parseFrom(ByteString data)

public static BigQueryDestination parseFrom(ByteString data)
Parameter
Name Description
data ByteString
Returns
Type Description
BigQueryDestination
Exceptions
Type Description
InvalidProtocolBufferException

parseFrom(ByteString data, ExtensionRegistryLite extensionRegistry)

public static BigQueryDestination parseFrom(ByteString data, ExtensionRegistryLite extensionRegistry)
Parameters
Name Description
data ByteString
extensionRegistry ExtensionRegistryLite
Returns
Type Description
BigQueryDestination
Exceptions
Type Description
InvalidProtocolBufferException

parseFrom(CodedInputStream input)

public static BigQueryDestination parseFrom(CodedInputStream input)
Parameter
Name Description
input CodedInputStream
Returns
Type Description
BigQueryDestination
Exceptions
Type Description
IOException

parseFrom(CodedInputStream input, ExtensionRegistryLite extensionRegistry)

public static BigQueryDestination parseFrom(CodedInputStream input, ExtensionRegistryLite extensionRegistry)
Parameters
Name Description
input CodedInputStream
extensionRegistry ExtensionRegistryLite
Returns
Type Description
BigQueryDestination
Exceptions
Type Description
IOException

parseFrom(InputStream input)

public static BigQueryDestination parseFrom(InputStream input)
Parameter
Name Description
input InputStream
Returns
Type Description
BigQueryDestination
Exceptions
Type Description
IOException

parseFrom(InputStream input, ExtensionRegistryLite extensionRegistry)

public static BigQueryDestination parseFrom(InputStream input, ExtensionRegistryLite extensionRegistry)
Parameters
Name Description
input InputStream
extensionRegistry ExtensionRegistryLite
Returns
Type Description
BigQueryDestination
Exceptions
Type Description
IOException

parseFrom(ByteBuffer data)

public static BigQueryDestination parseFrom(ByteBuffer data)
Parameter
Name Description
data ByteBuffer
Returns
Type Description
BigQueryDestination
Exceptions
Type Description
InvalidProtocolBufferException

parseFrom(ByteBuffer data, ExtensionRegistryLite extensionRegistry)

public static BigQueryDestination parseFrom(ByteBuffer data, ExtensionRegistryLite extensionRegistry)
Parameters
Name Description
data ByteBuffer
extensionRegistry ExtensionRegistryLite
Returns
Type Description
BigQueryDestination
Exceptions
Type Description
InvalidProtocolBufferException

parser()

public static Parser<BigQueryDestination> parser()
Returns
Type Description
Parser<BigQueryDestination>

Methods

equals(Object obj)

public boolean equals(Object obj)
Parameter
Name Description
obj Object
Returns
Type Description
boolean
Overrides

getDataset()

public String getDataset()

Required. The BigQuery dataset in format "projects/projectId/datasets/datasetId", to which the snapshot result should be exported. If this dataset does not exist, the export call returns an INVALID_ARGUMENT error. Setting the contentType for exportAssets determines the schema of the BigQuery table. Setting separateTablesPerAssetType to TRUE also influences the schema.

string dataset = 1 [(.google.api.field_behavior) = REQUIRED];

Returns
Type Description
String

The dataset.

getDatasetBytes()

public ByteString getDatasetBytes()

Required. The BigQuery dataset in format "projects/projectId/datasets/datasetId", to which the snapshot result should be exported. If this dataset does not exist, the export call returns an INVALID_ARGUMENT error. Setting the contentType for exportAssets determines the schema of the BigQuery table. Setting separateTablesPerAssetType to TRUE also influences the schema.

string dataset = 1 [(.google.api.field_behavior) = REQUIRED];

Returns
Type Description
ByteString

The bytes for dataset.

getDefaultInstanceForType()

public BigQueryDestination getDefaultInstanceForType()
Returns
Type Description
BigQueryDestination

getForce()

public boolean getForce()

If the destination table already exists and this flag is TRUE, the table will be overwritten by the contents of assets snapshot. If the flag is FALSE or unset and the destination table already exists, the export call returns an INVALID_ARGUMEMT error.

bool force = 3;

Returns
Type Description
boolean

The force.

getParserForType()

public Parser<BigQueryDestination> getParserForType()
Returns
Type Description
Parser<BigQueryDestination>
Overrides

getPartitionSpec()

public PartitionSpec getPartitionSpec()

[partition_spec] determines whether to export to partitioned table(s) and how to partition the data.

If [partition_spec] is unset or [partition_spec.partition_key] is unset or PARTITION_KEY_UNSPECIFIED, the snapshot results will be exported to non-partitioned table(s). [force] will decide whether to overwrite existing table(s).

If [partition_spec] is specified. First, the snapshot results will be written to partitioned table(s) with two additional timestamp columns, readTime and requestTime, one of which will be the partition key. Secondly, in the case when any destination table already exists, it will first try to update existing table's schema as necessary by appending additional columns. Then, if [force] is TRUE, the corresponding partition will be overwritten by the snapshot results (data in different partitions will remain intact); if [force] is unset or FALSE, it will append the data. An error will be returned if the schema update or data appension fails.

.google.cloud.asset.v1.PartitionSpec partition_spec = 4;

Returns
Type Description
PartitionSpec

The partitionSpec.

getPartitionSpecOrBuilder()

public PartitionSpecOrBuilder getPartitionSpecOrBuilder()

[partition_spec] determines whether to export to partitioned table(s) and how to partition the data.

If [partition_spec] is unset or [partition_spec.partition_key] is unset or PARTITION_KEY_UNSPECIFIED, the snapshot results will be exported to non-partitioned table(s). [force] will decide whether to overwrite existing table(s).

If [partition_spec] is specified. First, the snapshot results will be written to partitioned table(s) with two additional timestamp columns, readTime and requestTime, one of which will be the partition key. Secondly, in the case when any destination table already exists, it will first try to update existing table's schema as necessary by appending additional columns. Then, if [force] is TRUE, the corresponding partition will be overwritten by the snapshot results (data in different partitions will remain intact); if [force] is unset or FALSE, it will append the data. An error will be returned if the schema update or data appension fails.

.google.cloud.asset.v1.PartitionSpec partition_spec = 4;

Returns
Type Description
PartitionSpecOrBuilder

getSeparateTablesPerAssetType()

public boolean getSeparateTablesPerAssetType()

If this flag is TRUE, the snapshot results will be written to one or multiple tables, each of which contains results of one asset type. The [force] and [partition_spec] fields will apply to each of them.

Field [table] will be concatenated with "" and the asset type names (see https://cloud.google.com/asset-inventory/docs/supported-asset-types for supported asset types) to construct per-asset-type table names, in which all non-alphanumeric characters like "." and "/" will be substituted by "". Example: if field [table] is "mytable" and snapshot results contain "storage.googleapis.com/Bucket" assets, the corresponding table name will be "mytable_storage_googleapis_com_Bucket". If any of these tables does not exist, a new table with the concatenated name will be created.

When [content_type] in the ExportAssetsRequest is RESOURCE, the schema of each table will include RECORD-type columns mapped to the nested fields in the Asset.resource.data field of that asset type (up to the 15 nested level BigQuery supports (https://cloud.google.com/bigquery/docs/nested-repeated#limitations)). The fields in >15 nested levels will be stored in JSON format string as a child column of its parent RECORD column.

If error occurs when exporting to any table, the whole export call will return an error but the export results that already succeed will persist. Example: if exporting to table_type_A succeeds when exporting to table_type_B fails during one export call, the results in table_type_A will persist and there will not be partial results persisting in a table.

bool separate_tables_per_asset_type = 5;

Returns
Type Description
boolean

The separateTablesPerAssetType.

getSerializedSize()

public int getSerializedSize()
Returns
Type Description
int
Overrides

getTable()

public String getTable()

Required. The BigQuery table to which the snapshot result should be written. If this table does not exist, a new table with the given name will be created.

string table = 2 [(.google.api.field_behavior) = REQUIRED];

Returns
Type Description
String

The table.

getTableBytes()

public ByteString getTableBytes()

Required. The BigQuery table to which the snapshot result should be written. If this table does not exist, a new table with the given name will be created.

string table = 2 [(.google.api.field_behavior) = REQUIRED];

Returns
Type Description
ByteString

The bytes for table.

hasPartitionSpec()

public boolean hasPartitionSpec()

[partition_spec] determines whether to export to partitioned table(s) and how to partition the data.

If [partition_spec] is unset or [partition_spec.partition_key] is unset or PARTITION_KEY_UNSPECIFIED, the snapshot results will be exported to non-partitioned table(s). [force] will decide whether to overwrite existing table(s).

If [partition_spec] is specified. First, the snapshot results will be written to partitioned table(s) with two additional timestamp columns, readTime and requestTime, one of which will be the partition key. Secondly, in the case when any destination table already exists, it will first try to update existing table's schema as necessary by appending additional columns. Then, if [force] is TRUE, the corresponding partition will be overwritten by the snapshot results (data in different partitions will remain intact); if [force] is unset or FALSE, it will append the data. An error will be returned if the schema update or data appension fails.

.google.cloud.asset.v1.PartitionSpec partition_spec = 4;

Returns
Type Description
boolean

Whether the partitionSpec field is set.

hashCode()

public int hashCode()
Returns
Type Description
int
Overrides

internalGetFieldAccessorTable()

protected GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable()
Returns
Type Description
FieldAccessorTable
Overrides

isInitialized()

public final boolean isInitialized()
Returns
Type Description
boolean
Overrides

newBuilderForType()

public BigQueryDestination.Builder newBuilderForType()
Returns
Type Description
BigQueryDestination.Builder

newBuilderForType(GeneratedMessageV3.BuilderParent parent)

protected BigQueryDestination.Builder newBuilderForType(GeneratedMessageV3.BuilderParent parent)
Parameter
Name Description
parent BuilderParent
Returns
Type Description
BigQueryDestination.Builder
Overrides

newInstance(GeneratedMessageV3.UnusedPrivateParameter unused)

protected Object newInstance(GeneratedMessageV3.UnusedPrivateParameter unused)
Parameter
Name Description
unused UnusedPrivateParameter
Returns
Type Description
Object
Overrides

toBuilder()

public BigQueryDestination.Builder toBuilder()
Returns
Type Description
BigQueryDestination.Builder

writeTo(CodedOutputStream output)

public void writeTo(CodedOutputStream output)
Parameter
Name Description
output CodedOutputStream
Overrides
Exceptions
Type Description
IOException