initial commit

This commit is contained in:
missing 2022-08-02 12:28:52 -05:00
commit 58fa65fb6b
13 changed files with 2944 additions and 0 deletions

1
.gitignore vendored Normal file
View file

@ -0,0 +1 @@
/target

1619
Cargo.lock generated Normal file

File diff suppressed because it is too large Load diff

26
Cargo.toml Normal file
View file

@ -0,0 +1,26 @@
[package]
name = "registry"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
async-trait = "0.1.56"
axum = "0.5.13"
futures-core = "0.3.21"
futures-util = "0.3.21"
hmac-sha256 = "1.1.4"
once_cell = "1.13.0"
semver = { version = "1.0.12", features = ["serde"] }
serde = { version = "1.0.140", features = ["derive"] }
serde_json = "1.0.82"
sqlx = { version = "0.6.0", features = ["runtime-tokio-rustls", "postgres"] }
tokio = { version = "1.20.1", features = ["full"] }
tokio-util = { version = "0.7.3", features = ["io"] }
toml = "0.5.9"
tower = "0.4.13"
tower-http = { version = "0.3.4", features = ["trace"] }
tracing-subscriber = "0.3.15"
[features]

51
README.md Normal file
View file

@ -0,0 +1,51 @@
# warehouse
A ~~simple~~ cargo registry.
To run this, you need to:
- Create a `registry.toml` file in the current directory with the following contents:
```toml
working_dir = "/path/to/some/directory"
postgres_uri = "postgres://1.2.3.4/db_name"
listen_uri = "0.0.0.0:1234"
```
- Run a PostgreSQL database at the specified URI.
- Initialize a (not bare) git repository at `the_working_dir/index`
- Create a `index/config.json` file, with the following contents:
```json
{
"dl": "http://your.website:1234/api/v1/crates",
"api": "http://your.website:1234"
}
```
HTTPS is currently broken for an unknown reason.
- Write the following to `index/.git/hooks/post-commit`:
```sh
#!/bin/sh
exec git update-server-info
```
- Commit the `config.json` to git.
- Run any webserver at `index/.git`. (another method of serving git would also work)
Note: you may need to set `net.git-fetch-with-cli = true` in your `~/.cargo/config.toml`
The URL of that webserver/git repository is the URL of the registry.
Users must add the following to their `~/.cargo/config.toml`:
```toml
[registries]
foobarbaz = { index = "http://the.index.url" }
```
Creating accounts is currently not managed by this code. You will have to run the following query to add a user:
```sql
INSERT INTO users (login, credential) VALUES ('username', 'account token')
```
You can then give out the token for someone else to use with `cargo login`.

60
src/create.sql Normal file
View file

@ -0,0 +1,60 @@
CREATE TABLE IF NOT EXISTS users (
id int NOT NULL UNIQUE GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
login text NOT NULL UNIQUE,
credential text NOT NULL UNIQUE,
name text
);
CREATE TABLE IF NOT EXISTS crates (
id int NOT NULL UNIQUE GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
name text NOT NULL UNIQUE,
publisher int NOT NULL REFERENCES users (id),
owners int[] NOT NULL
);
CREATE TYPE version_feature AS (
feature text,
enables text[]
);
CREATE TABLE IF NOT EXISTS versions (
id int NOT NULL UNIQUE GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
vers text NOT NULL,
cksum char(64) NOT NULL,
yanked boolean NOT NULL,
links text,
crate_id int NOT NULL REFERENCES crates (id),
features version_feature[] NOT NULL,
authors text[] NOT NULL,
description text,
documentation text,
homepage text,
readme text,
readme_file text,
keywords text[] NOT NULL,
categories text[] NOT NULL,
license text,
license_file text,
repository text,
badges jsonb NOT NULL
);
CREATE TYPE dependency_kind AS ENUM (
'dev',
'build',
'normal'
);
CREATE TABLE IF NOT EXISTS deps (
id int NOT NULL UNIQUE GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
name text NOT NULL REFERENCES crates (name),
version_req text NOT NULL,
optional boolean NOT NULL,
default_features boolean NOT NULL,
target text,
kind dependency_kind NOT NULL,
registry text,
package text,
features text[] NOT NULL,
version_id int NOT NULL REFERENCES versions (id)
);

147
src/db.rs Normal file
View file

@ -0,0 +1,147 @@
use std::collections::HashMap;
use once_cell::sync::OnceCell;
use sqlx::{
error::BoxDynError,
postgres::{types::Oid, PgHasArrayType, PgTypeInfo},
query_as,
types::Json,
Decode, Encode, Executor, FromRow, Postgres, Type,
};
#[derive(Clone, Copy, Default, Debug)]
#[repr(transparent)]
pub struct PgU32(pub u32);
impl PgU32 {
fn to_i32(self) -> i32 {
i32::from_ne_bytes(self.0.to_ne_bytes())
}
fn from_i32(v: i32) -> Self {
Self(u32::from_ne_bytes(v.to_ne_bytes()))
}
}
impl<'r> Decode<'r, Postgres> for PgU32 {
fn decode(
value: <Postgres as sqlx::database::HasValueRef<'r>>::ValueRef,
) -> Result<Self, sqlx::error::BoxDynError> {
i32::decode(value).map(Self::from_i32)
}
}
impl<'q> Encode<'q, Postgres> for PgU32 {
fn encode_by_ref(
&self,
buf: &mut <Postgres as sqlx::database::HasArguments<'q>>::ArgumentBuffer,
) -> sqlx::encode::IsNull {
self.to_i32().encode(buf)
}
fn produces(&self) -> Option<<Postgres as sqlx::Database>::TypeInfo> {
self.to_i32().produces()
}
fn size_hint(&self) -> usize {
self.to_i32().size_hint()
}
}
impl Type<Postgres> for PgU32 {
fn type_info() -> <Postgres as sqlx::Database>::TypeInfo {
i32::type_info()
}
}
impl PgHasArrayType for PgU32 {
fn array_type_info() -> PgTypeInfo {
i32::array_type_info()
}
}
#[derive(FromRow)]
pub struct DbUser {
pub id: PgU32,
pub login: String,
pub credential: String,
pub name: Option<String>,
}
#[derive(FromRow)]
pub struct DbCrate {
pub id: PgU32,
pub name: String,
pub publisher: PgU32,
pub owners: Vec<PgU32>,
}
#[derive(FromRow)]
pub struct DbVersion {
pub id: PgU32,
pub vers: String,
pub cksum: String,
pub yanked: bool,
pub links: Option<String>,
pub crate_id: PgU32,
pub features: Vec<DbVersionFeature>,
pub authors: Vec<String>,
pub description: Option<String>,
pub documentation: Option<String>,
pub homepage: Option<String>,
pub readme: Option<String>,
pub readme_file: Option<String>,
pub keywords: Vec<String>,
pub categories: Vec<String>,
pub license: Option<String>,
pub license_file: Option<String>,
pub repository: Option<String>,
pub badges: Json<HashMap<String, HashMap<String, String>>>,
}
#[derive(FromRow)]
pub struct DbDep {
pub id: PgU32,
pub name: String,
pub version_req: String,
pub optional: bool,
pub default_features: bool,
pub target: Option<String>,
pub kind: String,
pub registry: Option<String>,
pub package: Option<String>,
pub features: Vec<String>,
pub version_id: PgU32,
}
#[derive(Type)]
#[sqlx(type_name = "version_feature")]
pub struct DbVersionFeature {
pub feature: String,
pub enables: Vec<String>,
}
static VERSION_FEATURE_ARRAY_OID: OnceCell<Oid> = OnceCell::new();
pub async fn init<'c, E: Executor<'c, Database = Postgres> + Copy>(
e: E,
) -> Result<(), BoxDynError> {
// explicitly ignore the result, since it currently throws an error if the type already exists
let _ = e.execute(include_str!("create.sql")).await;
let (oid,): (Oid,) = query_as("SELECT typarray FROM pg_type WHERE typname = 'version_feature'")
.fetch_one(e)
.await?;
VERSION_FEATURE_ARRAY_OID
.set(oid)
.expect("db::init called multiple times");
Ok(())
}
impl PgHasArrayType for DbVersionFeature {
fn array_type_info() -> PgTypeInfo {
PgTypeInfo::with_oid(*VERSION_FEATURE_ARRAY_OID.get().unwrap())
}
}

29
src/download.rs Normal file
View file

@ -0,0 +1,29 @@
use std::sync::Arc;
use axum::{
body::{BoxBody, StreamBody},
extract::Path,
http::StatusCode,
Extension,
};
use semver::Version;
use tokio::fs::File;
use tokio_util::io::ReaderStream;
use crate::State;
pub async fn download(
Path((crate_name, version)): Path<(String, Version)>,
Extension(state): Extension<Arc<State>>,
) -> Result<BoxBody, StatusCode> {
let mut path = state.crate_dir.clone();
path.push(&crate_name);
path.push(version.to_string());
path.push("crate.crate");
let file = match File::open(path).await {
Ok(v) => v,
Err(_) => return Err(StatusCode::NOT_FOUND),
};
Ok(BoxBody::new(StreamBody::new(ReaderStream::new(file))))
}

188
src/index.rs Normal file
View file

@ -0,0 +1,188 @@
use std::{collections::HashMap, process::Stdio, str::FromStr};
use futures_util::StreamExt;
use semver::{Version, VersionReq};
use serde::{Deserialize, Serialize};
use sqlx::query_as;
use tokio::{
fs::{self, OpenOptions},
io::AsyncWriteExt,
process::Command,
};
use crate::{
db::{DbDep, DbVersion, PgU32},
db_error, get_crate_prefix, internal_error, Errors, State, INDEX_LOCK,
};
#[derive(Serialize, Deserialize)]
pub struct CrateVersion {
pub name: String,
pub vers: Version,
pub deps: Vec<Dependency>,
pub cksum: String,
pub features: HashMap<String, Vec<String>>,
pub yanked: bool,
pub links: Option<String>,
/// Should always be `1` for maximum compatability
pub v: u32,
// `features2` exists, but for out purposes, it can be ignored. See https://doc.rust-lang.org/cargo/reference/registries.html
}
#[derive(Serialize, Deserialize)]
pub struct Dependency {
#[serde(flatten)]
pub base: BaseDependency,
pub package: Option<String>,
}
#[derive(Serialize, Deserialize)]
pub struct BaseDependency {
pub name: String,
pub version_req: VersionReq,
pub features: Vec<String>,
pub optional: bool,
pub default_features: bool,
pub target: Option<String>,
pub kind: DependencyKind,
pub registry: Option<String>,
}
#[derive(Serialize, Deserialize, Clone, Copy)]
pub enum DependencyKind {
Dev,
Build,
Normal,
}
impl DependencyKind {
pub fn to_db_repr(self) -> &'static str {
match self {
DependencyKind::Dev => "dev",
DependencyKind::Build => "build",
DependencyKind::Normal => "normal",
}
}
}
impl FromStr for DependencyKind {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"dev" => Ok(DependencyKind::Dev),
"build" => Ok(DependencyKind::Build),
"normal" => Ok(DependencyKind::Normal),
_ => Err(()),
}
}
}
pub async fn update_crate_from_db(
crate_id: PgU32,
state: &State,
message: &str,
) -> Result<(), Errors> {
let lock = INDEX_LOCK.lock().await;
let mut db = state.db.acquire().await.map_err(db_error)?;
let (mut crate_name,): (String,) = query_as("SELECT name FROM crates WHERE id = $1")
.bind(crate_id)
.fetch_one(&mut db)
.await
.map_err(db_error)?;
// we use `fetch_all` here since we cant use `fetch` because of conflicting mutable borrows
let versions = query_as::<_, DbVersion>("SELECT * FROM versions WHERE crate_id = $1")
.bind(crate_id)
.fetch_all(&mut db)
.await
.map_err(db_error)?;
let mut versions2 = Vec::new();
for version in versions {
let mut deps = query_as::<_, DbDep>("SELECT * FROM deps WHERE version_id = $1")
.bind(version.id)
.fetch(&mut db);
let mut deps2 = Vec::new();
while let Some(dep) = deps.next().await.transpose().map_err(db_error)? {
deps2.push(Dependency {
base: BaseDependency {
name: dep.name,
version_req: dep.version_req.parse().unwrap(),
features: dep.features,
optional: dep.optional,
default_features: dep.default_features,
target: dep.target,
kind: dep.kind.parse().unwrap(),
registry: dep.registry,
},
package: dep.package,
})
}
versions2.push(CrateVersion {
name: crate_name.clone(),
vers: version.vers.parse().unwrap(),
deps: deps2,
cksum: version.cksum,
features: version
.features
.into_iter()
.map(|v| (v.feature, v.enables))
.collect(),
yanked: version.yanked,
links: version.links,
v: 1,
});
}
crate_name.make_ascii_lowercase();
let mut path = state.index_dir.clone();
path.push(&get_crate_prefix(&crate_name).unwrap());
fs::create_dir_all(&path).await.map_err(internal_error)?;
path.push(&crate_name);
let mut file = OpenOptions::new()
.write(true)
.create(true)
.truncate(true)
.open(&path)
.await
.map_err(internal_error)?;
for version in versions2 {
let mut buf = serde_json::to_vec(&version).map_err(internal_error)?;
buf.push(b'\n');
file.write_all(&buf).await.map_err(internal_error)?;
}
Command::new("git")
.arg("add")
.arg(&path)
.current_dir(&state.index_dir)
.stdin(Stdio::null())
.stderr(Stdio::null())
.stdout(Stdio::null())
.status()
.await
.map_err(internal_error)?;
Command::new("git")
.arg("commit")
.arg("-m")
.arg(message)
.current_dir(&state.index_dir)
.stdin(Stdio::null())
.stderr(Stdio::null())
.stdout(Stdio::null())
.status()
.await
.map_err(internal_error)?;
drop(lock);
Ok(())
}

256
src/main.rs Normal file
View file

@ -0,0 +1,256 @@
mod db;
mod download;
mod index;
mod new_crate;
mod owners;
mod search;
mod yank;
use async_trait::async_trait;
use axum::{
body::Body,
extract::{FromRequest, RequestParts},
http::{header::AUTHORIZATION, StatusCode},
response::{IntoResponse, Response},
routing::{delete, get, put},
Extension, Json, Router,
};
use db::DbUser;
use download::download;
use owners::{add_owners, list_owners, remove_owners};
use search::search;
use serde::{ser::SerializeStruct, Deserialize, Serialize};
use sqlx::{error::BoxDynError, postgres::PgPoolOptions, query_as, Pool, Postgres};
use std::{fmt::Display, net::SocketAddr, path::PathBuf, sync::Arc};
use tokio::{fs, sync::Mutex};
use tower::ServiceBuilder;
use tower_http::trace::TraceLayer;
use tracing_subscriber::filter::LevelFilter;
use yank::{unyank, yank};
#[derive(Serialize)]
pub struct Errors {
errors: Vec<SingleError>,
}
impl Errors {
fn new(v: impl ToString) -> Self {
Self {
errors: vec![SingleError {
detail: v.to_string(),
}],
}
}
fn new_many(v: impl IntoIterator<Item = impl ToString>) -> Errors {
Self {
errors: v
.into_iter()
.map(|v| SingleError {
detail: v.to_string(),
})
.collect(),
}
}
}
impl<T: ToString> From<T> for Errors {
fn from(v: T) -> Self {
Self::new(v)
}
}
impl IntoResponse for Errors {
fn into_response(self) -> Response {
Json(self).into_response()
}
}
impl From<Errors> for Response {
fn from(v: Errors) -> Self {
v.into_response()
}
}
pub type RespResult<T = Success> = std::result::Result<T, Response>;
#[derive(Serialize)]
struct SingleError {
detail: String,
}
fn get_crate_prefix(s: &str) -> Result<PathBuf, &'static str> {
if s.is_empty() {
return Err("Crate name must be non-empty");
} else if !s.is_ascii() {
return Err("Crate name must be ASCII");
}
let mut buf = PathBuf::new();
if s.len() == 1 {
buf.push("1");
} else if s.len() == 2 {
buf.push("2");
} else if s.len() == 3 {
buf.push("3");
let c = s.chars().next().unwrap().to_ascii_lowercase();
let mut b = [0];
buf.push(c.encode_utf8(&mut b));
} else {
let mut b: [u8; 4] = s.as_bytes()[0..4].try_into().unwrap();
let s = std::str::from_utf8_mut(&mut b).unwrap();
s.make_ascii_lowercase();
buf.push(&s[0..2]);
buf.push(&s[2..4]);
}
Ok(buf)
}
pub struct State {
index_dir: PathBuf,
crate_dir: PathBuf,
db: Pool<Postgres>,
}
static INDEX_LOCK: Mutex<()> = Mutex::const_new(());
#[derive(Deserialize)]
pub struct Config {
working_dir: PathBuf,
postgres_uri: String,
listen_uri: SocketAddr,
}
#[tokio::main]
async fn main() -> Result<(), BoxDynError> {
let config = toml::from_str::<Config>(
&fs::read_to_string("registry.toml")
.await
.expect("Config file missing"),
)
.expect("Invalid config file");
let mut index_dir = config.working_dir.clone();
index_dir.push("index");
let mut crate_dir = config.working_dir;
crate_dir.push("crates");
let state = Arc::new(State {
index_dir,
crate_dir,
db: PgPoolOptions::new().connect(&config.postgres_uri).await?,
});
tracing_subscriber::fmt()
.with_max_level(LevelFilter::TRACE)
.init();
db::init(&state.db).await?;
let app = Router::new()
.route("/api/v1/crates/new", put(new_crate::new_crate))
.route("/api/v1/crates/:crate_name/:version/yank", delete(yank))
.route("/api/v1/crates/:crate_name/:version/unyank", put(unyank))
.route(
"/api/v1/crates/:crate_name/owners",
get(list_owners).put(add_owners).delete(remove_owners),
)
.route(
"/api/v1/crates/:crate_name/:version/download",
get(download),
)
.route("/api/v1/crates", get(search))
.layer(
ServiceBuilder::new()
.layer(Extension(state.clone()))
.layer(TraceLayer::new_for_http()),
);
fs::create_dir_all(&state.index_dir).await?;
fs::create_dir_all(&state.crate_dir).await?;
axum::Server::bind(&config.listen_uri)
.serve(app.into_make_service())
.await
.unwrap();
Ok(())
}
pub struct Auth(DbUser);
#[async_trait]
impl FromRequest<Body> for Auth {
type Rejection = Response;
async fn from_request(req: &mut RequestParts<Body>) -> Result<Self, Self::Rejection> {
let Extension(state): Extension<Arc<State>> = Extension::from_request(req)
.await
.map_err(IntoResponse::into_response)?;
if let Some(auth) = req
.headers()
.get(AUTHORIZATION)
.and_then(|v| v.to_str().ok())
{
if let Some(db_user) =
sqlx::query_as::<_, DbUser>("SELECT * FROM users WHERE credential = $1 LIMIT 1")
.bind(auth)
.fetch_optional(&state.db)
.await
.map_err(db_error)
.map_err(IntoResponse::into_response)?
{
Ok(Self(db_user))
} else {
Err(StatusCode::FORBIDDEN.into_response())
}
} else {
Err(StatusCode::FORBIDDEN.into_response())
}
}
}
pub async fn auth(crate_name: &str, auth_user: &DbUser, state: &State) -> Result<(), Response> {
let (is_authenticated,): (bool,) =
query_as("SELECT $1 = ANY (crates.owners) FROM crates WHERE name = $2")
.bind(auth_user.id)
.bind(&crate_name)
.fetch_one(&state.db)
.await
.map_err(db_error)
.map_err(IntoResponse::into_response)?;
if !is_authenticated {
return Err(StatusCode::FORBIDDEN.into_response());
}
Ok(())
}
pub struct Success;
impl Serialize for Success {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
let mut serialize_struct = serializer.serialize_struct("Struct", 1)?;
serialize_struct.serialize_field("ok", &true)?;
serialize_struct.end()
}
}
impl IntoResponse for Success {
fn into_response(self) -> Response {
Json(self).into_response()
}
}
fn internal_error<E: Display>(e: E) -> Errors {
Errors::new(format_args!("Internal server error: {e}"))
}
fn db_error<E: Display>(e: E) -> Errors {
Errors::new(format_args!("Database error: {e}"))
}

251
src/new_crate.rs Normal file
View file

@ -0,0 +1,251 @@
use crate::{
auth,
db::{DbVersionFeature, PgU32},
db_error,
index::{update_crate_from_db, BaseDependency},
internal_error, Auth, Errors, RespResult, State,
};
use async_trait::async_trait;
use axum::{
body::{Body, Bytes},
extract::{FromRequest, RequestParts},
Extension, Json,
};
use semver::Version;
use serde::{Deserialize, Serialize};
use sqlx::{query, query_as, types::Json as SqlxJson, Connection};
use std::{collections::HashMap, fmt::Write, sync::Arc};
use tokio::fs;
pub struct NewCrateRequest {
json_data: NewCrateJsonData,
crate_file: Vec<u8>,
}
#[async_trait]
impl FromRequest<Body> for NewCrateRequest {
type Rejection = Errors;
async fn from_request(req: &mut RequestParts<Body>) -> Result<Self, Self::Rejection> {
let body = Bytes::from_request(req).await.unwrap();
let mut offset = 0;
let json_len_bytes = body
.get(offset..offset + 4)
.ok_or_else(|| Errors::new("Not enough bytes when reading length of JSON data"))?;
let json_len = u32::from_le_bytes(json_len_bytes.try_into().unwrap()) as usize;
offset += 4;
let json_bytes = body
.get(offset..offset + json_len)
.ok_or_else(|| Errors::new("Not enough bytes when reading JSON data"))?;
let json_data = serde_json::from_slice::<NewCrateJsonData>(json_bytes)
.map_err(|e| Errors::new(format_args!("Error while parsing JSON data: {e}")))?;
offset += json_len;
let crate_len_bytes = body
.get(offset..offset + 4)
.ok_or_else(|| Errors::new("Not enough bytes when reading length of .crate file"))?;
let crate_len = u32::from_le_bytes(crate_len_bytes.try_into().unwrap()) as usize;
offset += 4;
let crate_file = body
.get(offset..offset + crate_len)
.ok_or_else(|| Errors::new("Not enough bytes when reading .crate file"))?
.to_vec();
offset += crate_len;
if body.len() != offset {
return Err(Errors::new("Too much data provided"));
}
Ok(Self {
json_data,
crate_file,
})
}
}
#[derive(Deserialize)]
struct NewCrateJsonData {
name: String,
vers: Version,
deps: Vec<Dependency2>,
features: HashMap<String, Vec<String>>,
authors: Vec<String>,
description: Option<String>,
documentation: Option<String>,
homepage: Option<String>,
readme: Option<String>,
readme_file: Option<String>,
keywords: Vec<String>,
categories: Vec<String>,
license: Option<String>,
license_file: Option<String>,
repository: Option<String>,
badges: HashMap<String, HashMap<String, String>>,
links: Option<String>,
}
#[derive(Deserialize)]
struct Dependency2 {
#[serde(flatten)]
base: BaseDependency,
explicit_name_in_toml: Option<String>,
}
#[derive(Serialize)]
pub struct NewCrateResponse {
warnings: Warnings,
}
#[derive(Serialize)]
struct Warnings {
invalid_categories: Vec<String>,
invalid_badges: Vec<String>,
other: Vec<String>,
}
pub async fn new_crate(
mut request: NewCrateRequest,
Auth(auth_user): Auth,
Extension(state): Extension<Arc<State>>,
) -> RespResult<Json<NewCrateResponse>> {
if !request.json_data.name.is_ascii() {
return Err(Errors::new("Crate name must be ASCII").into());
}
request.json_data.name.make_ascii_lowercase();
let mut db = state.db.acquire().await.map_err(db_error)?;
let crate_id = {
let mut trans = db.begin().await.map_err(db_error)?;
let (crate_id,): (PgU32,) = match query_as("SELECT id FROM crates WHERE name = $1 LIMIT 1")
.bind(&request.json_data.name)
.fetch_optional(&mut *trans)
.await
.map_err(db_error)?
{
Some(v) => {
auth(&request.json_data.name, &auth_user, &state).await?;
v
}
None => query_as(
"INSERT INTO crates (name, publisher, owners) VALUES ($1, $2, $3) RETURNING id",
)
.bind(&request.json_data.name)
.bind(auth_user.id)
.bind(&[auth_user.id][..])
.fetch_one(&mut *trans)
.await
.map_err(db_error)?,
};
let hash = hmac_sha256::Hash::hash(&request.crate_file);
let mut cksum = String::new();
for byte in hash {
write!(cksum, "{byte:02x}").expect("Formatting to a String failed");
}
let (version_id,): (PgU32,) = query_as(
r"INSERT INTO versions (
vers, cksum, yanked, links,
crate_id, features,
authors, description, documentation,
homepage, readme, readme_file,
keywords, categories,
license, license_file,
repository, badges
) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18)
RETURNING id",
)
.bind(&request.json_data.vers.to_string())
.bind(&cksum)
.bind(false)
.bind(&request.json_data.links)
.bind(crate_id)
.bind(
&request
.json_data
.features
.iter()
.map(|v| DbVersionFeature {
feature: v.0.clone(),
enables: v.1.clone(),
})
.collect::<Vec<_>>(),
)
.bind(&request.json_data.authors)
.bind(&request.json_data.description)
.bind(&request.json_data.documentation)
.bind(&request.json_data.homepage)
.bind(&request.json_data.readme)
.bind(&request.json_data.readme_file)
.bind(&request.json_data.keywords)
.bind(&request.json_data.categories)
.bind(&request.json_data.license)
.bind(&request.json_data.license_file)
.bind(&request.json_data.repository)
.bind(SqlxJson(&request.json_data.badges))
.fetch_one(&mut *trans)
.await
.map_err(db_error)?;
for dep in &request.json_data.deps {
query(
r"INSERT INTO deps (
name, version_req, optional, default_features,
target, kind, registry, package, feature, version_id
) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)",
)
.bind(&dep.base.name)
.bind(&dep.base.version_req.to_string())
.bind(&dep.base.optional)
.bind(&dep.base.default_features)
.bind(&dep.base.target)
.bind(&dep.base.kind.to_db_repr())
.bind(&dep.base.registry)
.bind(&dep.explicit_name_in_toml)
.bind(&dep.base.features)
.bind(version_id)
.execute(&mut *trans)
.await
.map_err(db_error)?;
}
trans.commit().await.map_err(db_error)?;
crate_id
};
update_crate_from_db(
crate_id,
&state,
&format!(
"Add version {} of '{}'",
request.json_data.vers, request.json_data.name
),
)
.await?;
let mut path = state.crate_dir.clone();
path.push(&request.json_data.name);
path.push(request.json_data.vers.to_string());
fs::create_dir_all(&path).await.map_err(internal_error)?;
path.push("crate.crate");
fs::write(path, request.crate_file)
.await
.map_err(internal_error)?;
Ok(Json(NewCrateResponse {
warnings: Warnings {
invalid_categories: Vec::new(),
invalid_badges: Vec::new(),
other: Vec::new(),
},
}))
}

135
src/owners.rs Normal file
View file

@ -0,0 +1,135 @@
use std::sync::Arc;
use async_trait::async_trait;
use axum::{
body::HttpBody,
extract::{FromRequest, Path, RequestParts},
BoxError, Extension, Json as AxumJson,
};
use futures_util::StreamExt;
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use sqlx::{query, query_as};
use crate::{auth, db::PgU32, db_error, Auth, Errors, RespResult, State, Success};
// TODO: custom `Json` errors
pub struct Json<T>(T);
#[async_trait]
impl<T, B> FromRequest<B> for Json<T>
where
T: DeserializeOwned,
B: HttpBody + Send,
B::Data: Send,
B::Error: Into<BoxError>,
{
type Rejection = Errors;
async fn from_request(req: &mut RequestParts<B>) -> Result<Self, Self::Rejection> {
let res = AxumJson::<T>::from_request(req).await;
match res {
Ok(AxumJson(v)) => Ok(Json(v)),
Err(e) => Err(Errors::new(format_args!("Invalid json data: {e}"))),
}
}
}
#[derive(Serialize)]
pub struct ListOwnersResponse {
users: Vec<User>,
}
#[derive(Serialize)]
struct User {
id: u32,
login: String,
name: Option<String>,
}
pub async fn list_owners(
Path(crate_name): Path<String>,
Auth(_auth_user): Auth,
Extension(state): Extension<Arc<State>>,
) -> RespResult<AxumJson<ListOwnersResponse>> {
let mut users = query_as("SELECT users.id, users.login, users.name FROM crates INNER JOIN users ON users.id = ANY (crates.owners) WHERE crates.name = $1")
.bind(&crate_name)
.fetch(&state.db);
let mut users2 = Vec::new();
while let Some(user) = users.next().await.transpose().map_err(db_error)? {
let _: (PgU32, _, _) = user;
users2.push(User {
id: user.0 .0,
login: user.1,
name: user.2,
});
}
Ok(AxumJson(ListOwnersResponse { users: users2 }))
}
#[derive(Deserialize)]
pub struct AddRemoveOwnersRequest {
users: Vec<String>,
}
#[derive(Serialize)]
pub struct AddRemoveOwnersResponse {
#[serde(flatten)]
success: Success,
msg: String,
}
pub async fn add_owners(
Json(request): Json<AddRemoveOwnersRequest>,
Path(crate_name): Path<String>,
Auth(auth_user): Auth,
Extension(state): Extension<Arc<State>>,
) -> RespResult<AxumJson<AddRemoveOwnersResponse>> {
auth(&crate_name, &auth_user, &state).await?;
query(
r"UPDATE crates SET owners = owners || (
SELECT array_agg(users.id) FROM users INNER JOIN unnest($1) ON users.login = unnest
) WHERE name = $2",
)
.bind(&request.users)
.bind(&crate_name)
.execute(&state.db)
.await
.map_err(db_error)?;
Ok(AxumJson(AddRemoveOwnersResponse {
success: Success,
msg: format!("adding {:?} to crate {}", request.users, crate_name),
}))
}
pub async fn remove_owners(
Json(request): Json<AddRemoveOwnersRequest>,
Path(crate_name): Path<String>,
Auth(auth_user): Auth,
Extension(state): Extension<Arc<State>>,
) -> RespResult<AxumJson<AddRemoveOwnersResponse>> {
auth(&crate_name, &auth_user, &state).await?;
query(
r"UPDATE crates SET owners = (
SELECT array_agg(unnest) FROM (
SELECT unnest(owners) EXCEPT (SELECT users.id FROM users INNER JOIN unnest($1) ON users.login = unnest)
) t (unnest)
) WHERE name = $2",
)
.bind(&request.users)
.bind(&crate_name)
.execute(&state.db)
.await
.map_err(db_error)?;
Ok(AxumJson(AddRemoveOwnersResponse {
success: Success,
msg: format!("removed {:?} from crate {}", request.users, crate_name),
}))
}

95
src/search.rs Normal file
View file

@ -0,0 +1,95 @@
use std::sync::Arc;
use async_trait::async_trait;
use axum::{
extract::{FromRequest, Query as AxumQuery, RequestParts},
Extension, Json,
};
use semver::Version;
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use sqlx::query_as;
use crate::{db::PgU32, db_error, Errors, RespResult, State};
pub struct Query<T>(T);
#[async_trait]
impl<T, B> FromRequest<B> for Query<T>
where
T: DeserializeOwned,
B: Send,
{
type Rejection = Errors;
async fn from_request(req: &mut RequestParts<B>) -> Result<Self, Self::Rejection> {
let res = AxumQuery::<T>::from_request(req).await;
match res {
Ok(AxumQuery(v)) => Ok(Query(v)),
Err(e) => Err(Errors::new(format_args!("Invalid query options: {e}"))),
}
}
}
#[derive(Deserialize)]
pub struct SearchQueryParams {
q: String,
per_page: Option<u8>,
}
#[derive(Serialize)]
pub struct SearchResponse {
crates: Vec<Crate>,
meta: Meta,
}
#[derive(Serialize)]
pub struct Crate {
name: String,
max_version: Version,
description: String,
}
#[derive(Serialize)]
pub struct Meta {
total: u32,
}
pub async fn search(
Query(params): Query<SearchQueryParams>,
Extension(state): Extension<Arc<State>>,
) -> RespResult<Json<SearchResponse>> {
let crates = query_as::<_, (PgU32, String)>(
"SELECT id, name FROM crates ORDER BY SIMILARITY(name, $1) DESC LIMIT $2;",
)
.bind(&params.q)
.bind(i16::from(params.per_page.unwrap_or(10)))
.fetch_all(&state.db)
.await
.map_err(db_error)?;
let mut crates2 = Vec::new();
for craet in crates {
let v: Option<(String, Option<String>)> = query_as(
"SELECT vers, description FROM versions WHERE crate_id = $1 ORDER BY vers DESC LIMIT 1",
)
.bind(craet.0)
.fetch_optional(&state.db)
.await
.map_err(db_error)?;
if let Some((vers, desc)) = v {
crates2.push(Crate {
name: craet.1,
max_version: vers.parse().unwrap(),
description: desc.unwrap_or_else(|| "".to_owned()),
})
}
}
Ok(Json(SearchResponse {
crates: crates2,
// TODO: total
meta: Meta { total: 0 },
}))
}

86
src/yank.rs Normal file
View file

@ -0,0 +1,86 @@
use std::sync::Arc;
use async_trait::async_trait;
use axum::{
extract::{FromRequest, Path as AxumPath, RequestParts},
Extension,
};
use semver::Version;
use serde::de::DeserializeOwned;
use sqlx::query_as;
use crate::{
auth, db::PgU32, db_error, index::update_crate_from_db, Auth, Errors, RespResult, State,
Success,
};
pub struct Path<T>(T);
#[async_trait]
impl<T, B> FromRequest<B> for Path<T>
where
T: DeserializeOwned + Send,
B: Send,
{
type Rejection = Errors;
async fn from_request(req: &mut RequestParts<B>) -> Result<Self, Self::Rejection> {
let res = AxumPath::<T>::from_request(req).await;
match res {
Ok(AxumPath(v)) => Ok(Path(v)),
Err(e) => Err(Errors::new(format_args!("Invalid path fragments: {e}"))),
}
}
}
pub async fn yank(
Path((crate_name, version)): Path<(String, Version)>,
Auth(auth_user): Auth,
Extension(state): Extension<Arc<State>>,
) -> RespResult {
auth(&crate_name, &auth_user, &state).await?;
do_yank(&crate_name, &version, &state, true).await
}
pub async fn unyank(
Path((crate_name, version)): Path<(String, Version)>,
Auth(auth_user): Auth,
Extension(state): Extension<Arc<State>>,
) -> RespResult {
auth(&crate_name, &auth_user, &state).await?;
do_yank(&crate_name, &version, &state, false).await
}
async fn do_yank(crate_name: &str, version: &Version, state: &State, yank: bool) -> RespResult {
let (crate_id,): (PgU32,) = query_as(
r"UPDATE versions SET yanked = $1
FROM crates
WHERE crates.id = versions.crate_id
and crates.name = $2
and versions.vers = $3
RETURNING crates.id",
)
.bind(yank)
.bind(crate_name)
.bind(&version.to_string())
.fetch_one(&state.db)
.await
.map_err(db_error)?;
update_crate_from_db(
crate_id,
state,
&format!(
"{} version {} of `{}`",
if yank { "Yank" } else { "Unyank" },
version,
crate_name
),
)
.await?;
Ok(Success)
}