pub struct AppValidationWorkspace { /* private fields */ }
Implementations§
source§impl AppValidationWorkspace
impl AppValidationWorkspace
sourcepub fn new(
authored_db: DbRead<DbKindAuthored>,
dht_db: DbWrite<DbKindDht>,
dht_db_cache: DhtDbQueryCache,
cache: DbWrite<DbKindCache>,
keystore: MetaLairClient,
dna_def: Arc<DnaDef>
) -> Self
pub fn new(
authored_db: DbRead<DbKindAuthored>,
dht_db: DbWrite<DbKindDht>,
dht_db_cache: DhtDbQueryCache,
cache: DbWrite<DbKindCache>,
keystore: MetaLairClient,
dna_def: Arc<DnaDef>
) -> Self
Examples found in repository?
src/core/queue_consumer.rs (lines 171-178)
76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259
pub async fn spawn_queue_consumer_tasks(
cell_id: CellId,
network: HolochainP2pDna,
space: &Space,
conductor_handle: ConductorHandle,
task_sender: sync::mpsc::Sender<ManagedTaskAdd>,
stop: sync::broadcast::Sender<()>,
) -> (QueueTriggers, InitialQueueTriggers) {
let Space {
authored_db,
dht_db,
cache_db: cache,
dht_query_cache,
..
} = space;
let keystore = conductor_handle.keystore().clone();
let dna_hash = Arc::new(cell_id.dna_hash().clone());
let queue_consumer_map = conductor_handle.get_queue_consumer_workflows();
// Publish
let (tx_publish, handle) = spawn_publish_dht_ops_consumer(
cell_id.agent_pubkey().clone(),
authored_db.clone(),
conductor_handle.clone(),
stop.subscribe(),
Box::new(network.clone()),
);
task_sender
.send(ManagedTaskAdd::cell_critical(
handle,
cell_id.clone(),
"publish_dht_ops_consumer",
))
.await
.expect("Failed to manage workflow handle");
// Validation Receipt
// One per space.
let (tx_receipt, handle) =
queue_consumer_map.spawn_once_validation_receipt(dna_hash.clone(), || {
spawn_validation_receipt_consumer(
dna_hash.clone(),
dht_db.clone(),
conductor_handle.clone(),
stop.subscribe(),
network.clone(),
)
});
if let Some(handle) = handle {
task_sender
.send(ManagedTaskAdd::cell_critical(
handle,
cell_id.clone(),
"validation_receipt_consumer",
))
.await
.expect("Failed to manage workflow handle");
}
// Integration
// One per space.
let (tx_integration, handle) =
queue_consumer_map.spawn_once_integration(dna_hash.clone(), || {
spawn_integrate_dht_ops_consumer(
dna_hash.clone(),
dht_db.clone(),
dht_query_cache.clone(),
stop.subscribe(),
tx_receipt.clone(),
network.clone(),
)
});
if let Some(handle) = handle {
task_sender
.send(ManagedTaskAdd::cell_critical(
handle,
cell_id.clone(),
"integrate_dht_ops_consumer",
))
.await
.expect("Failed to manage workflow handle");
}
let dna_def = conductor_handle
.get_dna_def(&*dna_hash)
.expect("Dna must be in store");
// App validation
// One per space.
let (tx_app, handle) = queue_consumer_map.spawn_once_app_validation(dna_hash.clone(), || {
spawn_app_validation_consumer(
dna_hash.clone(),
AppValidationWorkspace::new(
authored_db.clone().into(),
dht_db.clone(),
space.dht_query_cache.clone(),
cache.clone(),
keystore.clone(),
Arc::new(dna_def),
),
conductor_handle.clone(),
stop.subscribe(),
tx_integration.clone(),
network.clone(),
dht_query_cache.clone(),
)
});
if let Some(handle) = handle {
task_sender
.send(ManagedTaskAdd::cell_critical(
handle,
cell_id.clone(),
"app_validation_consumer",
))
.await
.expect("Failed to manage workflow handle");
}
let dna_def = conductor_handle
.get_dna_def(&*dna_hash)
.expect("Dna must be in store");
// Sys validation
// One per space.
let (tx_sys, handle) = queue_consumer_map.spawn_once_sys_validation(dna_hash.clone(), || {
spawn_sys_validation_consumer(
SysValidationWorkspace::new(
authored_db.clone().into(),
dht_db.clone().into(),
dht_query_cache.clone(),
cache.clone(),
Arc::new(dna_def),
),
space.clone(),
conductor_handle.clone(),
stop.subscribe(),
tx_app.clone(),
network.clone(),
)
});
if let Some(handle) = handle {
task_sender
.send(ManagedTaskAdd::cell_critical(
handle,
cell_id.clone(),
"sys_validation_consumer",
))
.await
.expect("Failed to manage workflow handle");
}
let (tx_cs, handle) = queue_consumer_map.spawn_once_countersigning(dna_hash.clone(), || {
spawn_countersigning_consumer(
space.clone(),
stop.subscribe(),
network.clone(),
tx_sys.clone(),
)
});
if let Some(handle) = handle {
task_sender
.send(ManagedTaskAdd::cell_critical(
handle,
cell_id.clone(),
"countersigning_consumer",
))
.await
.expect("Failed to manage workflow handle");
}
(
QueueTriggers {
sys_validation: tx_sys.clone(),
publish_dht_ops: tx_publish.clone(),
countersigning: tx_cs,
integrate_dht_ops: tx_integration.clone(),
},
InitialQueueTriggers::new(tx_sys, tx_publish, tx_app, tx_integration, tx_receipt),
)
}
sourcepub async fn validation_workspace(
&self
) -> Result<HostFnWorkspaceRead, AppValidationError>
pub async fn validation_workspace(
&self
) -> Result<HostFnWorkspaceRead, AppValidationError>
Examples found in repository?
src/core/workflow/app_validation_workflow.rs (line 437)
429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445
async fn validate_op_outer(
dna_hash: Arc<DnaHash>,
op: &Op,
conductor_handle: &ConductorHandle,
workspace: &AppValidationWorkspace,
network: &HolochainP2pDna,
) -> AppValidationOutcome<Outcome> {
// Get the workspace for the validation calls
let host_fn_workspace = workspace.validation_workspace().await?;
// Get the ribosome
let ribosome = conductor_handle
.get_ribosome(dna_hash.as_ref())
.map_err(|_| AppValidationError::DnaMissing((*dna_hash).clone()))?;
validate_op(op, host_fn_workspace, network, &ribosome).await
}
sourcepub fn full_cascade<Network: HolochainP2pDnaT + Clone + 'static + Send>(
&self,
network: Network
) -> Cascade<Network>
pub fn full_cascade<Network: HolochainP2pDnaT + Clone + 'static + Send>(
&self,
network: Network
) -> Cascade<Network>
Examples found in repository?
src/core/workflow/app_validation_workflow.rs (line 122)
82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
async fn app_validation_workflow_inner(
dna_hash: Arc<DnaHash>,
workspace: Arc<AppValidationWorkspace>,
conductor: ConductorHandle,
network: &HolochainP2pDna,
dht_query_cache: DhtDbQueryCache,
) -> WorkflowResult<WorkComplete> {
let db = workspace.dht_db.clone().into();
let sorted_ops = validation_query::get_ops_to_app_validate(&db).await?;
let start_len = sorted_ops.len();
tracing::debug!("validating {} ops", start_len);
let start = (start_len >= NUM_CONCURRENT_OPS).then(std::time::Instant::now);
let saturated = start.is_some();
// Validate all the ops
let iter = sorted_ops.into_iter().map({
let network = network.clone();
let workspace = workspace.clone();
move |so| {
let network = network.clone();
let conductor = conductor.clone();
let workspace = workspace.clone();
let dna_hash = dna_hash.clone();
async move {
let (op, op_hash) = so.into_inner();
let op_type = op.get_type();
let action = op.action();
let dependency = get_dependency(op_type, &action);
let op_light = op.to_light();
// If this is agent activity, track it for the cache.
let activity = matches!(op_type, DhtOpType::RegisterAgentActivity).then(|| {
(
action.author().clone(),
action.action_seq(),
matches!(dependency, Dependency::Null),
)
});
// Validate this op
let mut cascade = workspace.full_cascade(network.clone());
let r = match dhtop_to_op(op, &mut cascade).await {
Ok(op) => {
validate_op_outer(dna_hash, &op, &conductor, &(*workspace), &network).await
}
Err(e) => Err(e),
};
(op_hash, dependency, op_light, r, activity)
}
}
});
// Create a stream of concurrent validation futures.
// This will run NUM_CONCURRENT_OPS validation futures concurrently and
// return up to NUM_CONCURRENT_OPS * 100 results.
let mut iter = futures::stream::iter(iter)
.buffer_unordered(NUM_CONCURRENT_OPS)
.ready_chunks(NUM_CONCURRENT_OPS * 100);
// Spawn a task to actually drive the stream.
// This allows the stream to make progress in the background while
// we are committing previous results to the database.
let (tx, rx) = tokio::sync::mpsc::channel(NUM_CONCURRENT_OPS * 100);
let jh = tokio::spawn(async move {
while let Some(op) = iter.next().await {
// Send the result to task that will commit to the database.
if tx.send(op).await.is_err() {
tracing::warn!("app validation task has failed to send ops. This is not a problem if the conductor is shutting down");
break;
}
}
});
// Create a stream that will chunk up to NUM_CONCURRENT_OPS * 100 ready results.
let mut iter =
tokio_stream::wrappers::ReceiverStream::new(rx).ready_chunks(NUM_CONCURRENT_OPS * 100);
let mut total = 0;
let mut round_time = start.is_some().then(std::time::Instant::now);
// Pull in a chunk of results.
while let Some(chunk) = iter.next().await {
tracing::debug!(
"Committing {} ops",
chunk.iter().map(|c| c.len()).sum::<usize>()
);
let (t, a, r, activity) = workspace
.dht_db
.async_commit(move |txn| {
let mut total = 0;
let mut awaiting = 0;
let mut rejected = 0;
let mut agent_activity = Vec::new();
for outcome in chunk.into_iter().flatten() {
let (op_hash, dependency, op_light, outcome, activity) = outcome;
// Get the outcome or return the error
let outcome = outcome.or_else(|outcome_or_err| outcome_or_err.try_into())?;
// Collect all agent activity.
if let Some(activity) = activity {
// If the activity is accepted or rejected then it's ready to integrate.
if matches!(&outcome, Outcome::Accepted | Outcome::Rejected(_)) {
agent_activity.push(activity);
}
}
if let Outcome::AwaitingDeps(_) | Outcome::Rejected(_) = &outcome {
warn!(
msg = "DhtOp has failed app validation",
outcome = ?outcome,
);
}
match outcome {
Outcome::Accepted => {
total += 1;
if let Dependency::Null = dependency {
put_integrated(txn, &op_hash, ValidationStatus::Valid)?;
} else {
put_integration_limbo(txn, &op_hash, ValidationStatus::Valid)?;
}
}
Outcome::AwaitingDeps(deps) => {
awaiting += 1;
let status = ValidationLimboStatus::AwaitingAppDeps(deps);
put_validation_limbo(txn, &op_hash, status)?;
}
Outcome::Rejected(_) => {
rejected += 1;
tracing::warn!("Received invalid op! Warrants aren't implemented yet, so we can't do anything about this right now, but be warned that somebody on the network has maliciously hacked their node.\nOp: {:?}", op_light);
if let Dependency::Null = dependency {
put_integrated(txn, &op_hash, ValidationStatus::Rejected)?;
} else {
put_integration_limbo(txn, &op_hash, ValidationStatus::Rejected)?;
}
}
}
}
WorkflowResult::Ok((total, awaiting, rejected, agent_activity))
})
.await?;
// Once the database transaction is committed, add agent activity to the cache
// that is ready for integration.
for (author, seq, has_no_dependency) in activity {
// Any activity with no dependency is integrated in this workflow.
// TODO: This will no longer be true when [#1212](https://github.com/holochain/holochain/pull/1212) lands.
if has_no_dependency {
dht_query_cache
.set_activity_to_integrated(&author, seq)
.await?;
} else {
dht_query_cache
.set_activity_ready_to_integrate(&author, seq)
.await?;
}
}
total += t;
if let (Some(start), Some(round_time)) = (start, &mut round_time) {
let round_el = round_time.elapsed();
*round_time = std::time::Instant::now();
let avg_ops_ps = total as f64 / start.elapsed().as_micros() as f64 * 1_000_000.0;
let ops_ps = t as f64 / round_el.as_micros() as f64 * 1_000_000.0;
tracing::warn!(
"App validation is saturated. Util {:.2}%. OPS/s avg {:.2}, this round {:.2}",
(start_len - total) as f64 / NUM_CONCURRENT_OPS as f64 * 100.0,
avg_ops_ps,
ops_ps
);
}
tracing::debug!(
"{} committed, {} awaiting sys dep, {} rejected. {} committed this round",
t,
a,
r,
total
);
}
jh.await?;
tracing::debug!("accepted {} ops", total);
Ok(if saturated {
WorkComplete::Incomplete
} else {
WorkComplete::Complete
})
}
Auto Trait Implementations§
impl !RefUnwindSafe for AppValidationWorkspace
impl Send for AppValidationWorkspace
impl Sync for AppValidationWorkspace
impl Unpin for AppValidationWorkspace
impl !UnwindSafe for AppValidationWorkspace
Blanket Implementations§
§impl<T> Any for Twhere
T: Any + ?Sized,
impl<T> Any for Twhere
T: Any + ?Sized,
§fn type_id_compat(&self) -> TypeId
fn type_id_compat(&self) -> TypeId
TODO: once 1.33.0 is the minimum supported compiler version, remove
Any::type_id_compat and use StdAny::type_id instead.
https://github.com/rust-lang/rust/issues/27745
§impl<T> ArchivePointee for T
impl<T> ArchivePointee for T
§type ArchivedMetadata = ()
type ArchivedMetadata = ()
The archived version of the pointer metadata for this type.
§fn pointer_metadata(
_: &<T as ArchivePointee>::ArchivedMetadata
) -> <T as Pointee>::Metadata
fn pointer_metadata(
_: &<T as ArchivePointee>::ArchivedMetadata
) -> <T as Pointee>::Metadata
Converts some archived metadata to the pointer metadata for itself.
§impl<F, W, T, D> Deserialize<With<T, W>, D> for Fwhere
W: DeserializeWith<F, T, D>,
D: Fallible + ?Sized,
F: ?Sized,
impl<F, W, T, D> Deserialize<With<T, W>, D> for Fwhere
W: DeserializeWith<F, T, D>,
D: Fallible + ?Sized,
F: ?Sized,
§fn deserialize(
&self,
deserializer: &mut D
) -> Result<With<T, W>, <D as Fallible>::Error>
fn deserialize(
&self,
deserializer: &mut D
) -> Result<With<T, W>, <D as Fallible>::Error>
Deserializes using the given deserializer
§impl<T> FutureExt for T
impl<T> FutureExt for T
§fn with_context(self, otel_cx: Context) -> WithContext<Self> ⓘ
fn with_context(self, otel_cx: Context) -> WithContext<Self> ⓘ
§fn with_current_context(self) -> WithContext<Self> ⓘ
fn with_current_context(self) -> WithContext<Self> ⓘ
source§impl<T> Instrument for T
impl<T> Instrument for T
source§fn instrument(self, span: Span) -> Instrumented<Self> ⓘ
fn instrument(self, span: Span) -> Instrumented<Self> ⓘ
source§fn in_current_span(self) -> Instrumented<Self> ⓘ
fn in_current_span(self) -> Instrumented<Self> ⓘ
source§impl<T> Instrument for T
impl<T> Instrument for T
source§fn instrument(self, span: Span) -> Instrumented<Self> ⓘ
fn instrument(self, span: Span) -> Instrumented<Self> ⓘ
source§fn in_current_span(self) -> Instrumented<Self> ⓘ
fn in_current_span(self) -> Instrumented<Self> ⓘ
§impl<T> Pointable for T
impl<T> Pointable for T
§impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
§fn to_subset(&self) -> Option<SS>
fn to_subset(&self) -> Option<SS>
The inverse inclusion map: attempts to construct
self
from the equivalent element of its
superset. Read more§fn is_in_subset(&self) -> bool
fn is_in_subset(&self) -> bool
Checks if
self
is actually part of its subset T
(and can be converted to it).§fn to_subset_unchecked(&self) -> SS
fn to_subset_unchecked(&self) -> SS
Use with care! Same as
self.to_subset
but without any property checks. Always succeeds.§fn from_subset(element: &SS) -> SP
fn from_subset(element: &SS) -> SP
The inclusion map: converts
self
to the equivalent element of its superset.§impl<T> Upcastable for Twhere
T: 'static + Any + Send + Sync,
impl<T> Upcastable for Twhere
T: 'static + Any + Send + Sync,
§fn upcast_any_ref(&self) -> &(dyn Any + 'static)
fn upcast_any_ref(&self) -> &(dyn Any + 'static)
upcast ref
§fn upcast_any_mut(&mut self) -> &mut (dyn Any + 'static)
fn upcast_any_mut(&mut self) -> &mut (dyn Any + 'static)
upcast mut ref