mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-22 22:20:02 +00:00
feat: Upgrade rust to nightly-2022-07-14 (#217)
* feat: upgrade rust to nightly-2022-07-14 * style: Fix some clippy warnings * style: clippy fix * style: fix clippy * style: Fix clippy Some PartialEq warnings have been work around using cfg_attr test * feat: Implement Eq and PartialEq for PrimitiveType * chore: Remove unnecessary allow * chore: Remove usage of cfg_attr for PartialEq
This commit is contained in:
@@ -1 +1 @@
|
||||
nightly-2022-04-03
|
||||
nightly-2022-07-14
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
#![allow(clippy::all)]
|
||||
tonic::include_proto!("greptime.v1");
|
||||
|
||||
pub mod codec {
|
||||
|
||||
@@ -26,7 +26,8 @@ impl MemoryCatalogList {
|
||||
catalog: Arc<dyn CatalogProvider>,
|
||||
) -> Option<CatalogProviderRef> {
|
||||
let mut catalogs = self.catalogs.write().unwrap();
|
||||
match catalogs.entry(name) {
|
||||
let entry = catalogs.entry(name);
|
||||
match entry {
|
||||
Entry::Occupied(v) => Some(v.get().clone()),
|
||||
Entry::Vacant(v) => {
|
||||
v.insert(catalog);
|
||||
|
||||
@@ -257,7 +257,7 @@ pub fn decode_system_catalog(
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq)]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
pub enum EntryType {
|
||||
Catalog = 1,
|
||||
Schema = 2,
|
||||
@@ -280,25 +280,25 @@ impl TryFrom<u8> for EntryType {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub enum Entry {
|
||||
Catalog(CatalogEntry),
|
||||
Schema(SchemaEntry),
|
||||
Table(TableEntry),
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub struct CatalogEntry {
|
||||
pub catalog_name: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub struct SchemaEntry {
|
||||
pub catalog_name: String,
|
||||
pub schema_name: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub struct TableEntry {
|
||||
pub catalog_name: String,
|
||||
pub schema_name: String,
|
||||
@@ -306,7 +306,7 @@ pub struct TableEntry {
|
||||
pub table_id: TableId,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct TableEntryValue {
|
||||
pub table_id: TableId,
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use std::fmt;
|
||||
|
||||
/// Common status code for public API.
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum StatusCode {
|
||||
// ====== Begin of common status code ==============
|
||||
/// Success.
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use crate::data_type::ConcreteDataType;
|
||||
|
||||
/// Unique identifier for logical data type.
|
||||
#[derive(Clone, Copy, Debug, PartialEq)]
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub enum LogicalTypeId {
|
||||
Null,
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ use crate::data_type::{DataType, DataTypeRef};
|
||||
use crate::type_id::LogicalTypeId;
|
||||
use crate::value::Value;
|
||||
|
||||
#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct BinaryType;
|
||||
|
||||
impl BinaryType {
|
||||
|
||||
@@ -7,7 +7,7 @@ use crate::data_type::{DataType, DataTypeRef};
|
||||
use crate::type_id::LogicalTypeId;
|
||||
use crate::value::Value;
|
||||
|
||||
#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct BooleanType;
|
||||
|
||||
impl BooleanType {
|
||||
|
||||
@@ -6,7 +6,7 @@ use serde::{Deserialize, Serialize};
|
||||
use crate::data_type::DataType;
|
||||
use crate::prelude::{DataTypeRef, LogicalTypeId, Value};
|
||||
|
||||
#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct DateType;
|
||||
|
||||
impl DataType for DateType {
|
||||
|
||||
@@ -6,7 +6,7 @@ use serde::{Deserialize, Serialize};
|
||||
use crate::data_type::{DataType, DataTypeRef};
|
||||
use crate::prelude::{LogicalTypeId, Value};
|
||||
|
||||
#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct DateTimeType;
|
||||
|
||||
const DATE_TIME_TYPE_NAME: &str = "DateTime";
|
||||
|
||||
@@ -7,7 +7,7 @@ use crate::data_type::{DataType, DataTypeRef};
|
||||
use crate::type_id::LogicalTypeId;
|
||||
use crate::value::Value;
|
||||
|
||||
#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct NullType;
|
||||
|
||||
impl NullType {
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use std::any::TypeId;
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use arrow::datatypes::DataType as ArrowDataType;
|
||||
@@ -9,12 +10,20 @@ use crate::type_id::LogicalTypeId;
|
||||
use crate::types::primitive_traits::Primitive;
|
||||
use crate::value::Value;
|
||||
|
||||
#[derive(Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
pub struct PrimitiveType<T: Primitive> {
|
||||
#[serde(skip)]
|
||||
_phantom: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<T: Primitive, U: Primitive> PartialEq<PrimitiveType<U>> for PrimitiveType<T> {
|
||||
fn eq(&self, _other: &PrimitiveType<U>) -> bool {
|
||||
TypeId::of::<T>() == TypeId::of::<U>()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Primitive> Eq for PrimitiveType<T> {}
|
||||
|
||||
/// Create a new [ConcreteDataType] from a primitive type.
|
||||
pub trait DataTypeBuilder {
|
||||
fn build_data_type() -> ConcreteDataType;
|
||||
@@ -88,3 +97,25 @@ impl_numeric!(i32, Int32);
|
||||
impl_numeric!(i64, Int64);
|
||||
impl_numeric!(f32, Float32);
|
||||
impl_numeric!(f64, Float64);
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_eq() {
|
||||
assert_eq!(UInt8Type::default(), UInt8Type::default());
|
||||
assert_eq!(UInt16Type::default(), UInt16Type::default());
|
||||
assert_eq!(UInt32Type::default(), UInt32Type::default());
|
||||
assert_eq!(UInt64Type::default(), UInt64Type::default());
|
||||
assert_eq!(Int8Type::default(), Int8Type::default());
|
||||
assert_eq!(Int16Type::default(), Int16Type::default());
|
||||
assert_eq!(Int32Type::default(), Int32Type::default());
|
||||
assert_eq!(Int64Type::default(), Int64Type::default());
|
||||
assert_eq!(Float32Type::default(), Float32Type::default());
|
||||
assert_eq!(Float64Type::default(), Float64Type::default());
|
||||
|
||||
assert_ne!(Float32Type::default(), Float64Type::default());
|
||||
assert_ne!(Float32Type::default(), Int32Type::default());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ use serde::{Deserialize, Serialize};
|
||||
use crate::data_type::DataType;
|
||||
use crate::prelude::{DataTypeRef, LogicalTypeId, Value};
|
||||
|
||||
#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct StringType;
|
||||
|
||||
impl StringType {
|
||||
|
||||
@@ -63,7 +63,7 @@ impl Chunk {
|
||||
pub fn read(&self, dst: &mut [u8]) -> usize {
|
||||
let size = self.len().min(dst.len());
|
||||
let range = self.read_offset..(self.read_offset + size);
|
||||
(&mut dst[0..size]).copy_from_slice(&self.data[range]);
|
||||
dst[0..size].copy_from_slice(&self.data[range]);
|
||||
size
|
||||
}
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ const ENTRY_MIN_LEN: usize = HEADER_LENGTH + 4;
|
||||
// length + offset + namespace id + epoch
|
||||
const HEADER_LENGTH: usize = 4 + 8 + 8 + 8;
|
||||
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||
pub struct EntryImpl {
|
||||
pub data: Vec<u8>,
|
||||
pub offset: Offset,
|
||||
|
||||
@@ -6,7 +6,7 @@ use store_api::logstore::entry::{Id, Offset};
|
||||
use crate::error::Result;
|
||||
use crate::fs::file_name::FileName;
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq)]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
pub struct Location {
|
||||
pub file_name: FileName,
|
||||
pub offset: Offset,
|
||||
|
||||
@@ -3,7 +3,7 @@ use crate::ast::{ColumnDef, ObjectName, SqlOption, TableConstraint};
|
||||
/// Time index name, used in table constraints.
|
||||
pub const TIME_INDEX: &str = "__time_index";
|
||||
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||
pub struct CreateTable {
|
||||
/// Create if not exists
|
||||
pub if_not_exists: bool,
|
||||
|
||||
@@ -3,7 +3,7 @@ use sqlparser::parser::ParserError;
|
||||
|
||||
use crate::ast::{Expr, Value};
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct Insert {
|
||||
// Can only be sqlparser::ast::Statement::Insert variant
|
||||
pub inner: Statement,
|
||||
|
||||
@@ -3,7 +3,7 @@ use sqlparser::ast::Query as SpQuery;
|
||||
use crate::error::Error;
|
||||
|
||||
/// Query statement instance.
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct Query {
|
||||
pub inner: SpQuery,
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use crate::statements::show_kind::ShowKind;
|
||||
|
||||
/// SQL structure for `SHOW DATABASES`.
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct SqlShowDatabase {
|
||||
pub kind: ShowKind,
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@ use sqlparser::ast::Expr;
|
||||
use sqlparser::ast::Ident;
|
||||
|
||||
/// Show kind for SQL expressions like `SHOW DATABASE` or `SHOW TABLE`
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum ShowKind {
|
||||
All,
|
||||
Like(Ident),
|
||||
|
||||
@@ -7,7 +7,7 @@ use crate::statements::query::Query;
|
||||
use crate::statements::show_database::SqlShowDatabase;
|
||||
|
||||
/// Tokens parsed by `DFParser` are converted into these values.
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum Statement {
|
||||
// Databases.
|
||||
ShowDatabases(SqlShowDatabase),
|
||||
@@ -41,7 +41,7 @@ impl TryFrom<Statement> for SpStatement {
|
||||
/// Comment hints from SQL.
|
||||
/// It'll be enabled when using `--comment` in mysql client.
|
||||
/// Eg: `SELECT * FROM system.number LIMIT 1; -- { ErrorCode 25 }`
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct Hint {
|
||||
pub error_code: Option<u16>,
|
||||
pub comment: String,
|
||||
|
||||
@@ -4,8 +4,6 @@ use storage::write_batch::{codec, WriteBatch};
|
||||
|
||||
use super::util::gen_new_batch_and_extras;
|
||||
|
||||
tonic::include_proto!("greptime.storage.wal.v1");
|
||||
|
||||
/*
|
||||
-------------------------------------
|
||||
decode |
|
||||
|
||||
@@ -4,8 +4,6 @@ use storage::write_batch::{codec, WriteBatch};
|
||||
|
||||
use super::util::gen_new_batch_and_extras;
|
||||
|
||||
tonic::include_proto!("greptime.storage.wal.v1");
|
||||
|
||||
/*
|
||||
-------------------------------------
|
||||
encode |
|
||||
|
||||
@@ -4,8 +4,6 @@ use storage::write_batch::{codec, WriteBatch};
|
||||
|
||||
use super::util::gen_new_batch_and_extras;
|
||||
|
||||
tonic::include_proto!("greptime.storage.wal.v1");
|
||||
|
||||
/*
|
||||
-------------------------------------
|
||||
encode & decode |
|
||||
|
||||
@@ -18,7 +18,7 @@ use crate::metadata::{ColumnFamilyMetadata, ColumnMetadata, VersionNumber};
|
||||
use crate::sst::FileMeta;
|
||||
|
||||
/// Minimal data that could be used to persist and recover [RegionMetadata](crate::metadata::RegionMetadata).
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
pub struct RawRegionMetadata {
|
||||
pub id: RegionId,
|
||||
pub name: String,
|
||||
@@ -38,7 +38,7 @@ pub struct RawColumnsMetadata {
|
||||
}
|
||||
|
||||
/// Minimal data that could be used to persist and recover [ColumnFamiliesMetadata](crate::metadata::ColumnFamiliesMetadata).
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct RawColumnFamiliesMetadata {
|
||||
pub column_families: Vec<ColumnFamilyMetadata>,
|
||||
}
|
||||
@@ -48,12 +48,12 @@ pub struct RegionChange {
|
||||
pub metadata: RawRegionMetadata,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)]
|
||||
pub struct RegionRemove {
|
||||
pub region_id: RegionId,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)]
|
||||
pub struct RegionEdit {
|
||||
pub region_version: VersionNumber,
|
||||
pub flushed_sequence: SequenceNumber,
|
||||
|
||||
@@ -44,7 +44,7 @@ mod tests {
|
||||
manifest
|
||||
.update(RegionMetaActionList::with_action(RegionMetaAction::Change(
|
||||
RegionChange {
|
||||
metadata: (&*region_meta).into(),
|
||||
metadata: region_meta.as_ref().into(),
|
||||
},
|
||||
)))
|
||||
.await
|
||||
|
||||
@@ -76,7 +76,7 @@ impl Default for IterContext {
|
||||
}
|
||||
|
||||
/// The ordering of the iterator output.
|
||||
#[derive(Debug, PartialEq)]
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub enum RowOrdering {
|
||||
/// The output rows are unordered.
|
||||
Unordered,
|
||||
|
||||
@@ -117,8 +117,8 @@ impl From<&RegionMetadata> for RawRegionMetadata {
|
||||
RawRegionMetadata {
|
||||
id: data.id,
|
||||
name: data.name.clone(),
|
||||
columns: (&*data.columns).into(),
|
||||
column_families: (&data.column_families).into(),
|
||||
columns: RawColumnsMetadata::from(&*data.columns),
|
||||
column_families: RawColumnFamiliesMetadata::from(&data.column_families),
|
||||
version: data.version,
|
||||
}
|
||||
}
|
||||
@@ -274,7 +274,7 @@ impl From<RawColumnsMetadata> for ColumnsMetadata {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub struct ColumnFamiliesMetadata {
|
||||
/// Map column family id to column family metadata.
|
||||
id_to_cfs: HashMap<ColumnFamilyId, ColumnFamilyMetadata>,
|
||||
@@ -304,7 +304,7 @@ impl From<RawColumnFamiliesMetadata> for ColumnFamiliesMetadata {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct ColumnFamilyMetadata {
|
||||
/// Column family name.
|
||||
pub name: String,
|
||||
|
||||
@@ -7,7 +7,7 @@ use crate::error::Result;
|
||||
|
||||
/// Storage internal representation of a batch of rows.
|
||||
// Now the structure of `Batch` is still unstable, all pub fields may be changed.
|
||||
#[derive(Debug, Default, PartialEq)]
|
||||
#[derive(Debug, Default, PartialEq, Eq)]
|
||||
pub struct Batch {
|
||||
/// Rows organized in columnar format.
|
||||
///
|
||||
|
||||
@@ -102,7 +102,7 @@ impl<S: LogStore> RegionImpl<S> {
|
||||
.update(RegionMetaActionList::new(vec![
|
||||
RegionMetaAction::Protocol(ProtocolAction::new()),
|
||||
RegionMetaAction::Change(RegionChange {
|
||||
metadata: (&*metadata).into(),
|
||||
metadata: metadata.as_ref().into(),
|
||||
}),
|
||||
]))
|
||||
.await?;
|
||||
@@ -322,7 +322,7 @@ struct RegionInner<S: LogStore> {
|
||||
impl<S: LogStore> RegionInner<S> {
|
||||
#[inline]
|
||||
fn version_control(&self) -> &VersionControl {
|
||||
&*self.shared.version_control
|
||||
self.shared.version_control.as_ref()
|
||||
}
|
||||
|
||||
fn in_memory_metadata(&self) -> RegionMetaImpl {
|
||||
|
||||
@@ -198,7 +198,7 @@ async fn test_recover_region_manifets() {
|
||||
manifest
|
||||
.update(RegionMetaActionList::with_action(RegionMetaAction::Change(
|
||||
RegionChange {
|
||||
metadata: (&*region_meta).into(),
|
||||
metadata: region_meta.as_ref().into(),
|
||||
},
|
||||
)))
|
||||
.await
|
||||
|
||||
@@ -51,7 +51,7 @@ impl Snapshot for SnapshotImpl {
|
||||
builder = builder.pick_memtables(mem_set);
|
||||
}
|
||||
|
||||
let reader = builder.pick_ssts(&**self.version.ssts())?.build().await?;
|
||||
let reader = builder.pick_ssts(self.version.ssts())?.build().await?;
|
||||
|
||||
Ok(ScanResponse { reader })
|
||||
}
|
||||
|
||||
@@ -158,7 +158,7 @@ impl FileHandleInner {
|
||||
}
|
||||
|
||||
/// Immutable metadata of a sst file.
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct FileMeta {
|
||||
pub file_name: String,
|
||||
/// SST level of the file.
|
||||
|
||||
@@ -65,7 +65,7 @@ impl<'a> ParquetWriter<'a> {
|
||||
let writer = object.writer(0).await.context(error::FlushIoSnafu)?;
|
||||
|
||||
// now all physical types use plain encoding, maybe let caller to choose encoding for each type.
|
||||
let encodings = get_encoding_for_schema(&*schema, |_| Encoding::Plain);
|
||||
let encodings = get_encoding_for_schema(schema, |_| Encoding::Plain);
|
||||
|
||||
let mut sink = FileSink::try_new(
|
||||
writer,
|
||||
|
||||
@@ -576,13 +576,10 @@ pub mod codec {
|
||||
|
||||
let mut chunks = Vec::with_capacity(self.mutation_extras.len());
|
||||
|
||||
for state_opt in stream_states {
|
||||
for state_opt in stream_states.into_iter().flatten() {
|
||||
match state_opt {
|
||||
Some(s) => match s {
|
||||
StreamState::Some(chunk) => chunks.push(chunk),
|
||||
StreamState::Waiting => return Err(WriteBatchError::StreamWaiting),
|
||||
},
|
||||
None => (),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -19,13 +19,13 @@ pub fn supported_protocol_version() -> (ProtocolVersion, ProtocolVersion) {
|
||||
/// Protocol action that used to block older clients from reading or writing the log when backwards
|
||||
/// incompatible changes are made to the protocol. clients should be tolerant of messages and
|
||||
/// fields that they do not understand.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct ProtocolAction {
|
||||
pub min_reader_version: ProtocolVersion,
|
||||
pub min_writer_version: ProtocolVersion,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)]
|
||||
pub struct VersionHeader {
|
||||
pub prev_version: ManifestVersion,
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@ pub struct TableChange {
|
||||
pub table_info: TableInfo,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)]
|
||||
pub struct TableRemove {
|
||||
pub table_ident: TableIdent,
|
||||
pub table_name: String,
|
||||
|
||||
@@ -11,7 +11,7 @@ pub type TableVersion = u64;
|
||||
|
||||
/// Indicates whether and how a filter expression can be handled by a
|
||||
/// Table for table scans.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
|
||||
pub enum FilterPushDownType {
|
||||
/// The expression cannot be used by the provider.
|
||||
Unsupported,
|
||||
@@ -27,7 +27,7 @@ pub enum FilterPushDownType {
|
||||
}
|
||||
|
||||
/// Indicates the type of this table for metadata/catalog purposes.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum TableType {
|
||||
/// An ordinary physical table.
|
||||
Base,
|
||||
|
||||
Reference in New Issue
Block a user