spark_sdk/wallet/internal_handlers/implementations/
timelock.rs

1use std::collections::HashMap;
2
3use bitcoin::secp256k1::PublicKey;
4use bitcoin::{Sequence, Transaction};
5use frost_secp256k1_tr::round1::SigningCommitments;
6use spark_protos::common::SignatureIntent;
7use spark_protos::frost::{AggregateFrostRequest, FrostSigningJob};
8use spark_protos::spark::{
9    FinalizeNodeSignaturesRequest, NodeSignatures, RefreshTimelockRequest, SigningJob, TreeNode,
10};
11use tonic::{async_trait, Request};
12
13use crate::constants::spark::TIME_LOCK_INTERVAL;
14use crate::error::SparkSdkError;
15use crate::signer::default_signer::{
16    create_user_key_package, marshal_frost_commitments, marshal_frost_nonces,
17};
18use crate::signer::traits::SparkSigner;
19use crate::wallet::internal_handlers::traits::timelock::TimelockInternalHandlers;
20use crate::wallet::internal_handlers::utils::{
21    frost_commitment_to_proto_commitment, next_sequence, serialize_bitcoin_transaction,
22};
23use crate::wallet::utils::bitcoin::{bitcoin_tx_from_bytes, sighash_from_tx};
24use crate::wallet::utils::sequence::initial_sequence;
25use crate::SparkSdk;
26
27#[async_trait]
28impl<S: SparkSigner + Send + Sync> TimelockInternalHandlers for SparkSdk<S> {
29    async fn refresh_timelock_refund_tx(
30        &self,
31        leaf: &TreeNode,
32        signing_public_key: &PublicKey,
33    ) -> Result<(), SparkSdkError> {
34        // create and encode the new refund tx
35        let mut new_refund_tx = bitcoin_tx_from_bytes(&leaf.refund_tx)?;
36        let curr_sequence = new_refund_tx.input[0].sequence;
37        let next_sequence = next_sequence(curr_sequence.0);
38        new_refund_tx.input[0].sequence = Sequence(next_sequence);
39        let new_refund_tx_buf = serialize_bitcoin_transaction(&new_refund_tx)?;
40
41        // prepare the signing job
42        let commitment = self.signer.new_frost_signing_noncepair()?;
43        let commitment_bytes = commitment.serialize().unwrap();
44        let commitment_proto = frost_commitment_to_proto_commitment(&commitment)?;
45        let mut signing_jobs = Vec::new();
46        signing_jobs.push(SigningJob {
47            signing_public_key: signing_public_key.serialize().to_vec(),
48            raw_tx: new_refund_tx_buf,
49            signing_nonce_commitment: Some(commitment_proto),
50        });
51
52        // get the nonce, save the signing data
53        let nonce = self
54            .signer
55            .sensitive_expose_nonces_from_commitments(&commitment_bytes)?;
56        let mut signing_datas = Vec::new();
57        signing_datas.push(nonce);
58
59        // call Spark to refresh timelock on the leaf
60        let signing_jobs_len = signing_jobs.len();
61        let mut spark_client = self.config.spark_config.get_spark_connection(None).await?;
62        let owner_identity_public_key = self.config.spark_config.ssp_identity_public_key.clone();
63        let mut request = Request::new(RefreshTimelockRequest {
64            leaf_id: leaf.id.clone(),
65            owner_identity_public_key,
66            signing_jobs: signing_jobs.clone(),
67        });
68        self.add_authorization_header_to_request(&mut request, None);
69        let refresh_timelock_response = spark_client.refresh_timelock(request).await?.into_inner();
70
71        // validate response
72        if signing_jobs_len != refresh_timelock_response.signing_results.len() {
73            return Err(SparkSdkError::InvalidResponse(format!(
74                "number of signing jobs and signing results do not match: {} != {}",
75                signing_jobs_len,
76                refresh_timelock_response.signing_results.len()
77            )));
78        }
79
80        // prepare signing and aggregation jobs
81        let mut user_signing_jobs = Vec::new();
82        let mut job_to_aggregate_request_map = HashMap::new();
83        let mut job_to_node_id_map = HashMap::new();
84
85        let signing_results = refresh_timelock_response.signing_results;
86        for (i, signing_result) in signing_results.iter().enumerate() {
87            let signing_data = &signing_datas[i];
88            let signing_job = &signing_jobs[i];
89            let refund_tx = bitcoin_tx_from_bytes(&signing_job.raw_tx)?;
90            let node_tx = bitcoin_tx_from_bytes(&leaf.node_tx)?;
91
92            let refund_tx_sighash = sighash_from_tx(&refund_tx, 0, &node_tx.output[0])?;
93
94            let proto_signing_nonce = marshal_frost_nonces(&signing_data)?;
95            let proto_signing_commitment = marshal_frost_commitments(&signing_data.commitments())?;
96            let signing_secret_key = self
97                .signer
98                .sensitive_expose_secret_key_from_pubkey(&signing_job.signing_public_key, false)?;
99            let user_key_package = create_user_key_package(&signing_secret_key);
100
101            let signing_result_inner = signing_result.signing_result.as_ref().unwrap();
102            let operator_commitments = &signing_result_inner.signing_nonce_commitments;
103
104            let user_signing_job_id = uuid::Uuid::now_v7().to_string();
105
106            user_signing_jobs.push(FrostSigningJob {
107                job_id: user_signing_job_id.clone(),
108                message: refund_tx_sighash.to_vec(),
109                key_package: Some(user_key_package),
110                verifying_key: signing_result.verifying_key.clone(),
111                nonce: Some(proto_signing_nonce),
112                commitments: operator_commitments.clone(),
113                user_commitments: Some(proto_signing_commitment.clone()),
114                adaptor_public_key: Default::default(),
115            });
116
117            job_to_aggregate_request_map.insert(
118                user_signing_job_id.clone(),
119                AggregateFrostRequest {
120                    message: refund_tx_sighash.to_vec(),
121                    signature_shares: signing_result_inner.signature_shares.clone(),
122                    public_shares: signing_result_inner.public_keys.clone(),
123                    verifying_key: signing_result.verifying_key.clone(),
124                    commitments: operator_commitments.clone(),
125                    user_commitments: Some(proto_signing_commitment),
126                    user_public_key: signing_public_key.serialize().to_vec(),
127                    adaptor_public_key: Default::default(),
128                    user_signature_share: Default::default(),
129                },
130            );
131
132            job_to_node_id_map.insert(user_signing_job_id, leaf.id.clone());
133        }
134
135        // sign with user
136        let user_signatures = self.signer.sign_frost(user_signing_jobs)?;
137
138        // aggregate signatures
139        let mut node_signatures = Vec::new();
140        for (job_id, user_signature) in user_signatures.results.iter() {
141            let request = job_to_aggregate_request_map.get_mut(job_id).unwrap();
142            request.user_signature_share = user_signature.signature_share.clone();
143
144            let response = self.signer.aggregate_frost(request.clone())?;
145            node_signatures.push(NodeSignatures {
146                node_id: job_to_node_id_map[job_id].clone(),
147                refund_tx_signature: response.signature,
148                node_tx_signature: Default::default(),
149            });
150        }
151
152        // finalize node signatures for the flow
153        let mut request = Request::new(FinalizeNodeSignaturesRequest {
154            intent: SignatureIntent::Refresh.into(),
155            node_signatures,
156        });
157        self.add_authorization_header_to_request(&mut request, None);
158        spark_client.finalize_node_signatures(request).await?;
159
160        Ok(())
161    }
162
163    async fn refresh_timelock_nodes(
164        &self,
165        nodes: &Vec<TreeNode>,
166        parent_nodes: &Vec<TreeNode>,
167        signing_public_key: &PublicKey,
168    ) -> Result<(), SparkSdkError> {
169        if nodes.len() == 0 {
170            return Err(SparkSdkError::InvalidInput(
171                "no nodes to refresh timelock".to_string(),
172            ));
173        }
174
175        let mut signing_jobs = Vec::with_capacity(nodes.len() + 1);
176        let mut nonces = Vec::with_capacity(nodes.len() + 1);
177
178        for (i, node) in nodes.iter().enumerate() {
179            let mut new_node_tx = bitcoin_tx_from_bytes(&node.node_tx)?;
180            if i == 0 {
181                let curr_sequence = new_node_tx.input[0].sequence;
182                new_node_tx.input[0].sequence = Sequence(next_sequence(curr_sequence.0));
183            } else {
184                new_node_tx.input[0].sequence = Sequence(TIME_LOCK_INTERVAL);
185            }
186
187            let (signing_job, signing_commitment) =
188                self.signing_job_from_tx(&new_node_tx, signing_public_key)?;
189
190            signing_jobs.push(signing_job);
191
192            let commitment_bytes = signing_commitment.serialize().unwrap();
193            let nonce = self
194                .signer
195                .sensitive_expose_nonces_from_commitments(&commitment_bytes)?;
196            nonces.push(nonce);
197        }
198
199        // add one more job for the refund tx
200        let leaf = &nodes[nodes.len() - 1];
201        let mut new_refund_tx = bitcoin_tx_from_bytes(&leaf.refund_tx)?;
202        new_refund_tx.input[0].sequence = initial_sequence(); // set the sequence to the initial sequence
203        let (signing_job, signing_commitment) =
204            self.signing_job_from_tx(&new_refund_tx, signing_public_key)?;
205        signing_jobs.push(signing_job);
206        let commitment_bytes = signing_commitment.serialize().unwrap();
207        let refund_nonce = self
208            .signer
209            .sensitive_expose_nonces_from_commitments(&commitment_bytes)?;
210        nonces.push(refund_nonce);
211
212        // call Spark to refresh timelock on the nodes
213        let mut spark_client = self.config.spark_config.get_spark_connection(None).await?;
214        let owner_identity_public_key = self.config.spark_config.ssp_identity_public_key.clone();
215        let mut request = Request::new(RefreshTimelockRequest {
216            leaf_id: leaf.id.clone(),
217            owner_identity_public_key,
218            signing_jobs: signing_jobs.clone(),
219        });
220        self.add_authorization_header_to_request(&mut request, None);
221
222        let refresh_timelock_response = spark_client.refresh_timelock(request).await?.into_inner();
223
224        if signing_jobs.len() != refresh_timelock_response.signing_results.len() {
225            return Err(SparkSdkError::InvalidInput(format!(
226                "number of signing jobs and signing results do not match: {} != {}",
227                signing_jobs.len(),
228                refresh_timelock_response.signing_results.len()
229            )));
230        }
231
232        // sign and aggregate
233        let mut user_signing_jobs = Vec::new();
234        let mut job_to_aggregate_request_map = HashMap::new();
235        let mut job_to_node_id_map = HashMap::new();
236        let mut job_to_refund_map = HashMap::new();
237
238        for (i, signing_result) in refresh_timelock_response.signing_results.iter().enumerate() {
239            let nonce = nonces[i].clone();
240            let signing_job = &signing_jobs[i];
241            let raw_tx = bitcoin_tx_from_bytes(&signing_job.raw_tx)?;
242
243            // get parent node for txout for sighash
244            let (parent_node, node, refund, vout) = if i == nodes.len() {
245                // Refund tx
246                let node = &nodes[i - 1];
247                (node, node, true, 0)
248            } else {
249                let node = &nodes[i];
250                let parent_node = &parent_nodes[i];
251                (parent_node, node, false, node.vout as usize)
252            };
253            let parent_tx = bitcoin_tx_from_bytes(&parent_node.node_tx)?;
254            let txout = parent_tx.output[vout].clone();
255
256            let raw_tx_sighash = sighash_from_tx(&raw_tx, 0, &txout)?;
257            let proto_signing_nonce = marshal_frost_nonces(&nonce)?;
258            let proto_signing_commitment = marshal_frost_commitments(&nonce.commitments())?;
259            let signing_secret_key = self
260                .signer
261                .sensitive_expose_secret_key_from_pubkey(&signing_job.signing_public_key, false)?;
262            let user_key_package = create_user_key_package(&signing_secret_key);
263
264            let signing_result_inner = signing_result.signing_result.as_ref().unwrap();
265            let operator_commitments = &signing_result_inner.signing_nonce_commitments;
266
267            let user_signing_job_id = uuid::Uuid::now_v7().to_string();
268            user_signing_jobs.push(FrostSigningJob {
269                job_id: user_signing_job_id.clone(),
270                message: raw_tx_sighash.to_vec(),
271                key_package: Some(user_key_package),
272                verifying_key: signing_result.verifying_key.clone(),
273                nonce: Some(proto_signing_nonce),
274                commitments: operator_commitments.clone(),
275                user_commitments: Some(proto_signing_commitment.clone()),
276                adaptor_public_key: Default::default(),
277            });
278
279            job_to_aggregate_request_map.insert(
280                user_signing_job_id.clone(),
281                AggregateFrostRequest {
282                    message: raw_tx_sighash.to_vec(),
283                    signature_shares: signing_result_inner.signature_shares.clone(),
284                    public_shares: signing_result_inner.public_keys.clone(),
285                    verifying_key: signing_result.verifying_key.clone(),
286                    commitments: operator_commitments.clone(),
287                    user_commitments: Some(proto_signing_commitment),
288                    user_public_key: signing_public_key.serialize().to_vec(),
289                    adaptor_public_key: Default::default(),
290                    user_signature_share: Default::default(),
291                },
292            );
293
294            job_to_node_id_map.insert(user_signing_job_id.clone(), node.id.clone());
295            job_to_refund_map.insert(user_signing_job_id, refund);
296        }
297
298        let user_signatures = self.signer.sign_frost(user_signing_jobs)?;
299        let mut node_signatures = Vec::new();
300
301        for (job_id, user_signature) in user_signatures.results.iter() {
302            let request = job_to_aggregate_request_map.get_mut(job_id).unwrap();
303            request.user_signature_share = user_signature.signature_share.clone();
304
305            let response = self.signer.aggregate_frost(request.clone())?;
306
307            if job_to_refund_map[job_id] {
308                node_signatures.push(NodeSignatures {
309                    node_id: job_to_node_id_map[job_id].clone(),
310                    refund_tx_signature: response.signature,
311                    node_tx_signature: Default::default(),
312                });
313            } else {
314                node_signatures.push(NodeSignatures {
315                    node_id: job_to_node_id_map[job_id].clone(),
316                    node_tx_signature: response.signature,
317                    refund_tx_signature: Default::default(),
318                });
319            }
320        }
321
322        // finalize node signatures for the flow
323        let mut request = Request::new(FinalizeNodeSignaturesRequest {
324            intent: SignatureIntent::Refresh.into(),
325            node_signatures,
326        });
327        self.add_authorization_header_to_request(&mut request, None);
328        spark_client.finalize_node_signatures(request).await?;
329
330        Ok(())
331    }
332
333    fn signing_job_from_tx(
334        &self,
335        new_tx: &Transaction,
336        signing_public_key: &PublicKey,
337    ) -> Result<(SigningJob, SigningCommitments), SparkSdkError> {
338        let tx_bytes = serialize_bitcoin_transaction(new_tx)?;
339
340        let signing_commitment = self.signer.new_frost_signing_noncepair()?;
341        let proto_signing_commitment = marshal_frost_commitments(&signing_commitment)?;
342
343        let signing_job = SigningJob {
344            signing_public_key: signing_public_key.serialize().to_vec(),
345            raw_tx: tx_bytes,
346            signing_nonce_commitment: Some(proto_signing_commitment),
347        };
348
349        Ok((signing_job, signing_commitment))
350    }
351}
352
353// func signingJobFromTx(
354// 	newTx *wire.MsgTx,
355// 	signingPrivKey *secp256k1.PrivateKey,
356// ) (*pb.SigningJob, *objects.SigningNonce, error) {
357// 	var newTxBuf bytes.Buffer
358// 	err := newTx.Serialize(&newTxBuf)
359// 	if err != nil {
360// 		return nil, nil, fmt.Errorf("failed to serialize new refund tx: %v", err)
361// 	}
362
363// 	nonce, err := objects.RandomSigningNonce()
364// 	if err != nil {
365// 		return nil, nil, fmt.Errorf("failed to generate nonce: %v", err)
366// 	}
367// 	nonceCommitmentProto, err := nonce.SigningCommitment().MarshalProto()
368// 	if err != nil {
369// 		return nil, nil, fmt.Errorf("failed to marshal nonce commitment: %v", err)
370// 	}
371
372// 	signingJob := &pb.SigningJob{
373// 		SigningPublicKey:       signingPrivKey.PubKey().SerializeCompressed(),
374// 		RawTx:                  newTxBuf.Bytes(),
375// 		SigningNonceCommitment: nonceCommitmentProto,
376// 	}
377// 	return signingJob, nonce, nil
378// }