Add server client
This commit is contained in:
104
crates/pile-dataset/src/serve/items.rs
Normal file
104
crates/pile-dataset/src/serve/items.rs
Normal file
@@ -0,0 +1,104 @@
|
||||
use axum::{
|
||||
Json,
|
||||
extract::{Query, State},
|
||||
http::StatusCode,
|
||||
response::{IntoResponse, Response},
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::sync::Arc;
|
||||
use tracing::debug;
|
||||
use utoipa::ToSchema;
|
||||
|
||||
use crate::Datasets;
|
||||
|
||||
#[derive(Deserialize, ToSchema)]
|
||||
pub struct ItemsQuery {
|
||||
#[serde(default)]
|
||||
offset: usize,
|
||||
#[serde(default = "default_limit")]
|
||||
limit: usize,
|
||||
}
|
||||
|
||||
fn default_limit() -> usize {
|
||||
100
|
||||
}
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct ItemsResponse {
|
||||
pub items: Vec<ItemRef>,
|
||||
pub total: usize,
|
||||
pub offset: usize,
|
||||
pub limit: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct ItemRef {
|
||||
pub source: String,
|
||||
pub key: String,
|
||||
}
|
||||
|
||||
/// List all items across all sources with consistent ordering, paginated by offset and limit
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/items",
|
||||
params(
|
||||
("offset" = usize, Query, description = "Number of items to skip"),
|
||||
("limit" = usize, Query, description = "Maximum number of items to return (max 1000)"),
|
||||
),
|
||||
responses(
|
||||
(status = 200, description = "Paginated list of items", body = ItemsResponse),
|
||||
)
|
||||
)]
|
||||
pub async fn items_list(
|
||||
State(state): State<Arc<Datasets>>,
|
||||
Query(params): Query<ItemsQuery>,
|
||||
) -> Response {
|
||||
let limit = params.limit.min(1000);
|
||||
let offset = params.offset;
|
||||
|
||||
debug!(message = "Serving /items", offset, limit);
|
||||
|
||||
// Sort sources by label for a consistent global order: (source, key)
|
||||
let mut source_labels: Vec<_> = state.sources.keys().collect();
|
||||
source_labels.sort();
|
||||
|
||||
let mut items: Vec<ItemRef> = Vec::with_capacity(limit);
|
||||
let mut total = 0usize;
|
||||
let mut remaining_offset = offset;
|
||||
|
||||
for label in source_labels {
|
||||
let dataset = &state.sources[label];
|
||||
let source_len = dataset.len();
|
||||
|
||||
if remaining_offset >= source_len {
|
||||
// This entire source is before our window; skip it efficiently
|
||||
remaining_offset -= source_len;
|
||||
total += source_len;
|
||||
continue;
|
||||
}
|
||||
|
||||
let want = (limit - items.len()).min(source_len - remaining_offset);
|
||||
let source_str = label.as_str().to_owned();
|
||||
for item in dataset.iter_page(remaining_offset, want) {
|
||||
items.push(ItemRef {
|
||||
source: source_str.clone(),
|
||||
key: item.key().to_string(),
|
||||
});
|
||||
}
|
||||
remaining_offset = 0;
|
||||
total += source_len;
|
||||
}
|
||||
|
||||
debug!(message = "Served /items", offset, limit, total);
|
||||
|
||||
(
|
||||
StatusCode::OK,
|
||||
Json(ItemsResponse {
|
||||
items,
|
||||
total,
|
||||
offset,
|
||||
limit,
|
||||
}),
|
||||
)
|
||||
.into_response()
|
||||
}
|
||||
@@ -17,11 +17,14 @@ pub use item::*;
|
||||
mod field;
|
||||
pub use field::*;
|
||||
|
||||
mod items;
|
||||
pub use items::*;
|
||||
|
||||
#[derive(OpenApi)]
|
||||
#[openapi(
|
||||
tags(),
|
||||
paths(lookup, item_get, get_field),
|
||||
components(schemas(LookupRequest, LookupResponse, LookupResult, ItemQuery, FieldQuery))
|
||||
paths(lookup, item_get, get_field, items_list),
|
||||
components(schemas(LookupRequest, LookupResponse, LookupResult, ItemQuery, FieldQuery, ItemsQuery, ItemsResponse, ItemRef))
|
||||
)]
|
||||
pub(crate) struct Api;
|
||||
|
||||
@@ -37,6 +40,7 @@ impl Datasets {
|
||||
.route("/lookup", post(lookup))
|
||||
.route("/item", get(item_get))
|
||||
.route("/field", get(get_field))
|
||||
.route("/items", get(items_list))
|
||||
.with_state(self.clone());
|
||||
|
||||
if let Some(prefix) = prefix {
|
||||
|
||||
Reference in New Issue
Block a user