mirror of
https://github.com/GreptimeTeam/greptimedb.git
synced 2025-12-22 22:20:02 +00:00
ci: upgrade spell checker to 1.13.10 (#1045)
* ci: upgrade spell checker to 1.13.10 Signed-off-by: Zhizhen He <hezhizhen.yi@gmail.com> * fix: fix existing typos Signed-off-by: Zhizhen He <hezhizhen.yi@gmail.com> * chore: use taplo to format typos.toml Signed-off-by: Zhizhen He <hezhizhen.yi@gmail.com> * chore: add fmt-toml rule to format TOML files Signed-off-by: Zhizhen He <hezhizhen.yi@gmail.com> --------- Signed-off-by: Zhizhen He <hezhizhen.yi@gmail.com>
This commit is contained in:
4
.github/workflows/develop.yml
vendored
4
.github/workflows/develop.yml
vendored
@@ -31,8 +31,8 @@ jobs:
|
||||
name: Spell Check with Typos
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: crate-ci/typos@v1.0.4
|
||||
- uses: actions/checkout@v3
|
||||
- uses: crate-ci/typos@v1.13.10
|
||||
|
||||
check:
|
||||
name: Check
|
||||
|
||||
@@ -54,7 +54,7 @@ arrow-schema = { version = "29.0", features = ["serde"] }
|
||||
async-stream = "0.3"
|
||||
async-trait = "0.1"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
# TODO(LFC): Use released Datafusion when it officially dpendent on Arrow 29.0
|
||||
# TODO(LFC): Use released Datafusion when it officially dependent on Arrow 29.0
|
||||
datafusion = { git = "https://github.com/apache/arrow-datafusion.git", rev = "4917235a398ae20145c87d20984e6367dc1a0c1e" }
|
||||
datafusion-common = { git = "https://github.com/apache/arrow-datafusion.git", rev = "4917235a398ae20145c87d20984e6367dc1a0c1e" }
|
||||
datafusion-expr = { git = "https://github.com/apache/arrow-datafusion.git", rev = "4917235a398ae20145c87d20984e6367dc1a0c1e" }
|
||||
|
||||
4
Makefile
4
Makefile
@@ -19,6 +19,10 @@ clean: ## Clean the project.
|
||||
fmt: ## Format all the Rust code.
|
||||
cargo fmt --all
|
||||
|
||||
.PHONY: fmt-toml
|
||||
fmt-toml: ## Format all TOML files.
|
||||
taplo format --check --option "indent_string= "
|
||||
|
||||
.PHONY: docker-image
|
||||
docker-image: ## Build docker image.
|
||||
docker build --network host -f docker/Dockerfile -t ${IMAGE_REGISTRY}:${IMAGE_TAG} .
|
||||
|
||||
@@ -149,10 +149,10 @@ inputs:
|
||||
- title: 'Series Normalize: \noffset = 0'
|
||||
operator: prom
|
||||
inputs:
|
||||
- title: 'Filter: \ntimetamp > 2022-12-20T10:00:00 && timestamp < 2022-12-21T10:00:00'
|
||||
- title: 'Filter: \ntimestamp > 2022-12-20T10:00:00 && timestamp < 2022-12-21T10:00:00'
|
||||
operator: filter
|
||||
inputs:
|
||||
- title: 'Table Scan: \ntable = request_duration, timetamp > 2022-12-20T10:00:00 && timestamp < 2022-12-21T10:00:00'
|
||||
- title: 'Table Scan: \ntable = request_duration, timestamp > 2022-12-20T10:00:00 && timestamp < 2022-12-21T10:00:00'
|
||||
operator: scan -->
|
||||
|
||||

|
||||
|
||||
@@ -107,7 +107,7 @@ impl ParquetWriter {
|
||||
let mut end_loop = true;
|
||||
// TODO(hl & jiachun): Since OpenDAL's writer is async and ArrowWriter requires a `std::io::Write`,
|
||||
// here we use a Vec<u8> to buffer all parquet bytes in memory and write to object store
|
||||
// at a time. Maybe we should find a better way to brige ArrowWriter and OpenDAL's object.
|
||||
// at a time. Maybe we should find a better way to bridge ArrowWriter and OpenDAL's object.
|
||||
while let Some(batch) = self
|
||||
.stream
|
||||
.try_next()
|
||||
|
||||
@@ -35,7 +35,7 @@ use crate::selector::load_based::LoadBasedSelector;
|
||||
use crate::selector::SelectorType;
|
||||
use crate::service::admin;
|
||||
use crate::service::store::etcd::EtcdStore;
|
||||
use crate::service::store::kv::ResetableKvStoreRef;
|
||||
use crate::service::store::kv::ResettableKvStoreRef;
|
||||
use crate::service::store::memory::MemStore;
|
||||
use crate::{error, Result};
|
||||
|
||||
@@ -90,7 +90,7 @@ pub async fn make_meta_srv(opts: MetaSrvOptions) -> Result<MetaSrv> {
|
||||
)
|
||||
};
|
||||
|
||||
let in_memory = Arc::new(MemStore::default()) as ResetableKvStoreRef;
|
||||
let in_memory = Arc::new(MemStore::default()) as ResettableKvStoreRef;
|
||||
|
||||
let meta_peer_client = MetaPeerClientBuilder::default()
|
||||
.election(election.clone())
|
||||
|
||||
@@ -28,13 +28,13 @@ use crate::error::{match_for_io_error, Result};
|
||||
use crate::keys::{StatKey, StatValue, DN_STAT_PREFIX};
|
||||
use crate::metasrv::ElectionRef;
|
||||
use crate::service::store::ext::KvStoreExt;
|
||||
use crate::service::store::kv::ResetableKvStoreRef;
|
||||
use crate::service::store::kv::ResettableKvStoreRef;
|
||||
use crate::{error, util};
|
||||
|
||||
#[derive(Builder, Clone)]
|
||||
pub struct MetaPeerClient {
|
||||
election: Option<ElectionRef>,
|
||||
in_memory: ResetableKvStoreRef,
|
||||
in_memory: ResettableKvStoreRef,
|
||||
#[builder(default = "ChannelManager::default()")]
|
||||
channel_manager: ChannelManager,
|
||||
#[builder(default = "3")]
|
||||
|
||||
@@ -27,7 +27,7 @@ use crate::handler::HeartbeatHandlerGroup;
|
||||
use crate::lock::DistLockRef;
|
||||
use crate::selector::{Selector, SelectorType};
|
||||
use crate::sequence::SequenceRef;
|
||||
use crate::service::store::kv::{KvStoreRef, ResetableKvStoreRef};
|
||||
use crate::service::store::kv::{KvStoreRef, ResettableKvStoreRef};
|
||||
|
||||
pub const TABLE_ID_SEQ: &str = "table_id";
|
||||
|
||||
@@ -59,7 +59,7 @@ impl Default for MetaSrvOptions {
|
||||
pub struct Context {
|
||||
pub datanode_lease_secs: i64,
|
||||
pub server_addr: String,
|
||||
pub in_memory: ResetableKvStoreRef,
|
||||
pub in_memory: ResettableKvStoreRef,
|
||||
pub kv_store: KvStoreRef,
|
||||
pub election: Option<ElectionRef>,
|
||||
pub skip_all: Arc<AtomicBool>,
|
||||
@@ -93,7 +93,7 @@ pub struct MetaSrv {
|
||||
options: MetaSrvOptions,
|
||||
// It is only valid at the leader node and is used to temporarily
|
||||
// store some data that will not be persisted.
|
||||
in_memory: ResetableKvStoreRef,
|
||||
in_memory: ResettableKvStoreRef,
|
||||
kv_store: KvStoreRef,
|
||||
table_id_sequence: SequenceRef,
|
||||
selector: SelectorRef,
|
||||
@@ -142,7 +142,7 @@ impl MetaSrv {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn in_memory(&self) -> ResetableKvStoreRef {
|
||||
pub fn in_memory(&self) -> ResettableKvStoreRef {
|
||||
self.in_memory.clone()
|
||||
}
|
||||
|
||||
|
||||
@@ -24,14 +24,14 @@ use crate::lock::DistLockRef;
|
||||
use crate::metasrv::{ElectionRef, MetaSrv, MetaSrvOptions, SelectorRef, TABLE_ID_SEQ};
|
||||
use crate::selector::lease_based::LeaseBasedSelector;
|
||||
use crate::sequence::Sequence;
|
||||
use crate::service::store::kv::{KvStoreRef, ResetableKvStoreRef};
|
||||
use crate::service::store::kv::{KvStoreRef, ResettableKvStoreRef};
|
||||
use crate::service::store::memory::MemStore;
|
||||
|
||||
// TODO(fys): try use derive_builder macro
|
||||
pub struct MetaSrvBuilder {
|
||||
options: Option<MetaSrvOptions>,
|
||||
kv_store: Option<KvStoreRef>,
|
||||
in_memory: Option<ResetableKvStoreRef>,
|
||||
in_memory: Option<ResettableKvStoreRef>,
|
||||
selector: Option<SelectorRef>,
|
||||
handler_group: Option<HeartbeatHandlerGroup>,
|
||||
election: Option<ElectionRef>,
|
||||
@@ -63,7 +63,7 @@ impl MetaSrvBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
pub fn in_memory(mut self, in_memory: ResetableKvStoreRef) -> Self {
|
||||
pub fn in_memory(mut self, in_memory: ResettableKvStoreRef) -> Self {
|
||||
self.in_memory = Some(in_memory);
|
||||
self
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ use api::v1::meta::{
|
||||
use crate::error::Result;
|
||||
|
||||
pub type KvStoreRef = Arc<dyn KvStore>;
|
||||
pub type ResetableKvStoreRef = Arc<dyn ResetableKvStore>;
|
||||
pub type ResettableKvStoreRef = Arc<dyn ResettableKvStore>;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait KvStore: Send + Sync {
|
||||
@@ -40,6 +40,6 @@ pub trait KvStore: Send + Sync {
|
||||
async fn move_value(&self, req: MoveValueRequest) -> Result<MoveValueResponse>;
|
||||
}
|
||||
|
||||
pub trait ResetableKvStore: KvStore {
|
||||
pub trait ResettableKvStore: KvStore {
|
||||
fn reset(&self);
|
||||
}
|
||||
|
||||
@@ -24,7 +24,7 @@ use api::v1::meta::{
|
||||
use parking_lot::RwLock;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::service::store::kv::{KvStore, ResetableKvStore};
|
||||
use crate::service::store::kv::{KvStore, ResettableKvStore};
|
||||
|
||||
pub struct MemStore {
|
||||
inner: RwLock<BTreeMap<Vec<u8>, Vec<u8>>>,
|
||||
@@ -44,7 +44,7 @@ impl MemStore {
|
||||
}
|
||||
}
|
||||
|
||||
impl ResetableKvStore for MemStore {
|
||||
impl ResettableKvStore for MemStore {
|
||||
fn reset(&self) {
|
||||
self.inner.write().clear();
|
||||
}
|
||||
|
||||
@@ -50,7 +50,7 @@ pub type RangeTuple = (u32, u32);
|
||||
/// └───────────────┼───────────────┘
|
||||
/// ```
|
||||
///
|
||||
/// Then the [DictionaryArray] can be expanded to serveral ranges like this:
|
||||
/// Then the [DictionaryArray] can be expanded to several ranges like this:
|
||||
///
|
||||
/// ```text
|
||||
/// Keys
|
||||
@@ -96,7 +96,7 @@ impl RangeArray {
|
||||
unsafe { Ok(Self::from_ranges_unchecked(values, ranges)) }
|
||||
}
|
||||
|
||||
/// Construct [RangeArray] from given range without checking its validaty.
|
||||
/// Construct [RangeArray] from given range without checking its validity.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
@@ -194,7 +194,7 @@ impl RangeArray {
|
||||
)
|
||||
}
|
||||
|
||||
/// Build datatype of wrappered [RangeArray] on given value type.
|
||||
/// Build datatype of wrapped [RangeArray] on given value type.
|
||||
pub fn convert_data_type(value_type: DataType) -> DataType {
|
||||
DataType::Dictionary(Box::new(Self::key_type()), Box::new(value_type))
|
||||
}
|
||||
|
||||
@@ -364,7 +364,7 @@ pub struct PyQueryEngine {
|
||||
|
||||
#[pyclass]
|
||||
impl PyQueryEngine {
|
||||
// TODO(discord9): find a better way to call sql query api, now we don't if we are in async contex or not
|
||||
// TODO(discord9): find a better way to call sql query api, now we don't if we are in async context or not
|
||||
/// return sql query results in List[List[PyVector]], or List[usize] for AffectedRows number if no recordbatches is returned
|
||||
#[pymethod]
|
||||
fn sql(&self, s: String, vm: &VirtualMachine) -> PyResult<PyListRef> {
|
||||
|
||||
@@ -101,7 +101,7 @@ impl<'a> ParquetWriter<'a> {
|
||||
|
||||
// TODO(hl): Since OpenDAL's writer is async and ArrowWriter requires a `std::io::Write`,
|
||||
// here we use a Vec<u8> to buffer all parquet bytes in memory and write to object store
|
||||
// at a time. Maybe we should find a better way to brige ArrowWriter and OpenDAL's object.
|
||||
// at a time. Maybe we should find a better way to bridge ArrowWriter and OpenDAL's object.
|
||||
let mut buf = vec![];
|
||||
let mut arrow_writer = ArrowWriter::try_new(&mut buf, schema.clone(), Some(writer_props))
|
||||
.context(WriteParquetSnafu)?;
|
||||
|
||||
@@ -1,2 +1,5 @@
|
||||
[default.extend-words]
|
||||
ue = "ue"
|
||||
datas = "datas"
|
||||
[files]
|
||||
extend-exclude = ["corrupted"]
|
||||
|
||||
Reference in New Issue
Block a user