spark_sdk/wallet/internal_handlers/token_transaction.rs
1// // std
2// use std::collections::HashMap;
3// use std::io::Write;
4// use std::time::SystemTime;
5// use std::time::UNIX_EPOCH;
6// use std::vec;
7
8// // crate
9// use crate::error::SparkSdkError;
10// use crate::signer::traits::ecdsa::SparkSignerEcdsa;
11// use crate::signer::traits::secp256k1::KeygenMethod;
12// use crate::signer::traits::secp256k1::SparkSignerSecp256k1;
13// use crate::wallet::internal_handlers::utils::parsers::parse_public_key;
14// use crate::wallet::leaf_manager::LeafNode;
15// use crate::SparkSdk;
16
17// // external spark crates
18// // use spark_cryptography::secret_sharing::shamir_new::recover_secret_new;
19// // use spark_cryptography::secret_sharing::shamir_new::FeldmanShare;
20// use spark_protos::spark::token_transaction::TokenInput;
21// use spark_protos::spark::FinalizeTokenTransactionRequest;
22// use spark_protos::spark::MintInput;
23// use spark_protos::spark::OperatorSpecificTokenTransactionSignablePayload;
24// use spark_protos::spark::OperatorSpecificTokenTransactionSignature;
25// use spark_protos::spark::SignTokenTransactionRequest;
26// use spark_protos::spark::StartTokenTransactionRequest;
27// use spark_protos::spark::TokenLeafOutput;
28// use spark_protos::spark::TokenLeafToSpend;
29// use spark_protos::spark::TokenTransaction;
30// use spark_protos::spark::TokenTransactionSignatures;
31// use spark_protos::spark::TransferInput;
32
33// // external crates
34// use k256::elliptic_curve::scalar::FromUintUnchecked;
35// use sha2::Digest;
36
37// struct KeyshareWithOperatorIndex {
38// keyshare: Vec<u8>,
39// index: u32,
40// }
41
42// /// Converts two u64 values into a 16-byte array representing a uint128 in big-endian format
43// ///
44// /// # Arguments
45// /// * `high` - The high 64 bits
46// /// * `low` - The low 64 bits
47// ///
48// /// # Returns
49// /// A Vec<u8> containing the 16-byte big-endian representation
50// fn int64_to_uint128_bytes(high: u64, low: u64) -> Vec<u8> {
51// let mut bytes = Vec::with_capacity(16);
52// bytes.extend_from_slice(&high.to_be_bytes());
53// bytes.extend_from_slice(&low.to_be_bytes());
54// bytes
55// }
56
57// impl SparkSdk {
58// pub async fn test_flow(&self) -> Result<(), SparkSdkError> {
59// Ok(())
60// }
61
62// pub async fn mint_token(
63// &self,
64// leaf_values: Vec<u64>,
65// mint_to_public_keys: Option<Vec<Vec<u8>>>,
66// ) -> Result<Vec<String>, SparkSdkError> {
67// // collect operator public keys
68// let mut spark_operator_identity_public_keys = vec![];
69// for operator in self.config.spark_config.spark_operators.iter() {
70// spark_operator_identity_public_keys.push(operator.identity_public_key.clone());
71// }
72
73// // token public key is the issuer public key
74// // this assumes that the user has not issued a token before, since each keypair can only generate one token
75// let token_identity_public_key_bytes = self.get_identity_public_key().to_vec();
76
77// let owner_public_keys = match mint_to_public_keys {
78// Some(mint_to_public_keys) => mint_to_public_keys,
79// None => {
80// let mut leaf_owner_public_keys = vec![];
81// for _ in 0..leaf_values.len() {
82// let new_key = self.signer.new_secp256k1_keypair(KeygenMethod::Random)?;
83// println!("New key: {:?}", hex::encode(new_key.to_vec()));
84// leaf_owner_public_keys.push(new_key.to_vec());
85// }
86// leaf_owner_public_keys
87// }
88// };
89
90// // generate token public keys
91// let mut output_leaves = vec![];
92// for (i, leaf_value) in leaf_values.iter().enumerate() {
93// // Generate a new keypair for the revocation key
94// let revocation_keypair = self.signer.new_secp256k1_keypair(KeygenMethod::Random)?;
95
96// output_leaves.push(TokenLeafOutput {
97// id: None,
98// owner_public_key: owner_public_keys[i].clone(),
99// withdraw_bond_sats: None,
100// withdraw_relative_block_locktime: None,
101// token_public_key: token_identity_public_key_bytes.clone(),
102// token_amount: int64_to_uint128_bytes(0, *leaf_value),
103// revocation_public_key: Some(revocation_keypair.to_vec()),
104// });
105// }
106
107// let issue_token_transaction = TokenTransaction {
108// token_input: Some(TokenInput::MintInput(MintInput {
109// issuer_public_key: token_identity_public_key_bytes.clone(),
110// issuer_provided_timestamp: SystemTime::now()
111// .duration_since(UNIX_EPOCH)
112// .unwrap()
113// .as_nanos() as u64,
114// })),
115// output_leaves: output_leaves.clone(),
116// spark_operator_identity_public_keys: spark_operator_identity_public_keys.clone(),
117// };
118
119// let final_issue_token_transaction = self
120// .broadcast_token_transaction(
121// &issue_token_transaction,
122// vec![token_identity_public_key_bytes.clone()],
123// vec![],
124// )
125// .await?;
126
127// let final_issue_token_transaction_hash =
128// hash_token_transaction(&final_issue_token_transaction, false);
129
130// let mut leaf_ids = vec![];
131// for (i, leaf) in final_issue_token_transaction
132// .output_leaves
133// .iter()
134// .enumerate()
135// {
136// let leaf_id = &leaf.id.clone().unwrap();
137// leaf_ids.push(leaf_id.clone());
138// self.leaf_manager
139// .insert_leaves_in_batch(vec![LeafNode::new(
140// leaf_id.clone().to_string(),
141// "TREE_ID_NA".to_string(),
142// leaf_values[i],
143// None,
144// i as u32,
145// vec![],
146// owner_public_keys[i].clone(),
147// vec![],
148// vec![],
149// Some(token_identity_public_key_bytes.clone()),
150// leaf.revocation_public_key.clone().unwrap(),
151// final_issue_token_transaction_hash.clone(),
152// )])?;
153// }
154
155// Ok(leaf_ids)
156// }
157
158// pub async fn transfer_token(
159// &self,
160// leaf_ids: Vec<String>,
161// send_to_pubkey: Vec<u8>,
162// ) -> Result<(), SparkSdkError> {
163// let mut leaves = vec![];
164// for leaf_id in leaf_ids {
165// let leaf = self.leaf_manager.get_node(&leaf_id)?;
166// leaves.push(leaf);
167// }
168
169// // check token public key is the same for each
170// let token_public_key = leaves[0].token_public_key.clone();
171// for leaf in &leaves {
172// if leaf.token_public_key != token_public_key {
173// return Err(SparkSdkError::InvalidTokenTransaction(
174// "Token public key is not the same for all leaves".to_string(),
175// ));
176// }
177// }
178// // get spark operator identity public keys
179// let mut spark_operator_identity_public_keys = vec![];
180// for operator in self.config.spark_config.spark_operators.iter() {
181// spark_operator_identity_public_keys.push(operator.identity_public_key.clone());
182// }
183
184// // get leaves to spend
185// let mut leaves_to_spend = vec![];
186// for leaf in leaves.iter() {
187// leaves_to_spend.push(TokenLeafToSpend {
188// prev_token_transaction_hash: leaf.token_transaction_hash.clone(),
189// prev_token_transaction_leaf_vout: leaf.vout,
190// });
191// }
192
193// let send_amount = leaves.iter().map(|leaf| leaf.value).sum();
194
195// // Generate a new revocation keypair for the output leaf
196// let revocation_keypair = self.signer.new_secp256k1_keypair(KeygenMethod::Random)?;
197
198// let token_output = TokenLeafOutput {
199// id: None,
200// owner_public_key: send_to_pubkey.clone(),
201// withdraw_bond_sats: None,
202// withdraw_relative_block_locktime: None,
203// token_public_key: token_public_key.clone(),
204// token_amount: int64_to_uint128_bytes(0, send_amount),
205// revocation_public_key: Some(revocation_keypair.to_vec()),
206// };
207// let output_leaves = vec![token_output];
208
209// // create transfer token transaction
210// let transfer_token_transaction = TokenTransaction {
211// token_input: Some(TokenInput::TransferInput(TransferInput { leaves_to_spend })),
212// output_leaves,
213// spark_operator_identity_public_keys,
214// };
215
216// let final_transfer_token_transaction = self
217// .broadcast_token_transaction(
218// &transfer_token_transaction,
219// leaves
220// .iter()
221// .map(|leaf| leaf.signing_public_key.clone())
222// .collect(),
223// leaves
224// .iter()
225// .map(|leaf| leaf.revocation_public_key.clone())
226// .collect(),
227// )
228// .await?;
229
230// let final_transfer_token_transaction_hash =
231// hash_token_transaction(&final_transfer_token_transaction, false);
232
233// println!(
234// "final transfer transaction hash: {:?}",
235// final_transfer_token_transaction_hash
236// );
237// Ok(())
238// }
239
240// pub async fn broadcast_token_transaction(
241// &self,
242// token_transaction: &TokenTransaction,
243// leaf_to_spend_public_keys: Vec<Vec<u8>>,
244// leaf_to_spend_revocation_public_keys: Vec<Vec<u8>>,
245// ) -> Result<TokenTransaction, SparkSdkError> {
246// // hash token transaction
247// let partial_token_transaction_hash = hash_token_transaction(&token_transaction, true);
248
249// // collect owner signatures
250// let mut owner_signatures = vec![];
251// if get_token_transaction_issue_input(token_transaction).is_some() {
252// // For issuance transactions, one signature for the input issuer_public_key
253// let signature = self
254// .signer
255// .sign_message_ecdsa_with_identity_key(&partial_token_transaction_hash, false)?;
256
257// let secp = bitcoin::secp256k1::Secp256k1::new();
258// let identity_public_key = parse_public_key(&self.get_identity_public_key().to_vec())?;
259
260// let message =
261// bitcoin::secp256k1::Message::from_digest_slice(&partial_token_transaction_hash)
262// .map_err(|e| {
263// SparkSdkError::InvalidTokenTransaction(format!("Invalid message: {}", e))
264// })?;
265// let sig = bitcoin::secp256k1::ecdsa::Signature::from_der(&signature.clone()).map_err(
266// |e| SparkSdkError::InvalidTokenTransaction(format!("Invalid signature: {}", e)),
267// )?;
268
269// secp.verify_ecdsa(&message, &sig, &identity_public_key)?;
270
271// owner_signatures.push(signature);
272// } else if get_token_transaction_transfer_input(token_transaction).is_some() {
273// // For transfer transactions, one signature for each leaf
274// for public_key in leaf_to_spend_public_keys.iter() {
275// let signature = self.signer.sign_message_ecdsa_with_key(
276// &partial_token_transaction_hash,
277// public_key,
278// false,
279// )?;
280// owner_signatures.push(signature.clone());
281// }
282// }
283
284// println!("passed 1");
285
286// // start the token transaction with Spark
287// let mut coordinator_client = self.config.spark_config.get_spark_connection(None).await?;
288// let mut request = tonic::Request::new(StartTokenTransactionRequest {
289// identity_public_key: self.get_identity_public_key().to_vec(),
290// partial_token_transaction: Some(token_transaction.clone()),
291// token_transaction_signatures: Some(TokenTransactionSignatures { owner_signatures }),
292// });
293// self.add_authorization_header_to_request(&mut request, None);
294// let start_response = coordinator_client
295// .start_token_transaction(request)
296// .await?
297// .into_inner();
298
299// println!("passed 2");
300
301// // validate that the keyshare config returned by the coordinator SO matches the full signing operator list.
302// // TODO: When we support threshold signing allow the keyshare identifiers to be a subset of the signing operators.
303// let keyshare_info = start_response.keyshare_info.unwrap();
304// if keyshare_info.owner_identifiers.len() != self.config.spark_config.spark_operators.len() {
305// return Err(SparkSdkError::InvalidKeyshareConfig(
306// "Keyshare config does not match the full signing operator list".to_string(),
307// ));
308// }
309
310// for i in 0..keyshare_info.owner_identifiers.len() {
311// if self.config.spark_config.spark_operators.get(i).is_none() {
312// return Err(SparkSdkError::InvalidKeyshareConfig(
313// "Keyshare config does not match the full signing operator list".to_string(),
314// ));
315// }
316// }
317
318// println!("passed 3");
319
320// // validate that the operator signatures match the provided operator keys
321// let final_token_transaction = start_response.final_token_transaction.clone().unwrap();
322// let final_token_transaction_hash = hash_token_transaction(&final_token_transaction, false);
323
324// // get operator signatures
325// let mut operator_specific_signatures = vec![];
326// let payload = OperatorSpecificTokenTransactionSignablePayload {
327// final_token_transaction_hash: final_token_transaction_hash.clone(),
328// operator_identity_public_key: self.get_identity_public_key().to_vec(),
329// };
330
331// // hash the payload
332// let payload_hash = hash_operator_specific_token_transaction_signable_payload(&payload);
333
334// println!("passed 4");
335
336// // for issue transactions, create a single operator-specific signature using the issuer private key
337// if get_token_transaction_issue_input(token_transaction).is_some() {
338// let signature = self
339// .signer
340// .sign_message_ecdsa_with_identity_key(&payload_hash, false)?;
341
342// operator_specific_signatures.push(OperatorSpecificTokenTransactionSignature {
343// owner_public_key: self.get_identity_public_key().to_vec(),
344// owner_signature: signature,
345// payload: Some(payload.clone()),
346// });
347// }
348
349// println!("passed 5");
350
351// // for transfer transactions, create an operator-specific signature for each leaf.
352// if let Some(transfer_input) = get_token_transaction_transfer_input(token_transaction) {
353// for (i, _) in transfer_input.leaves_to_spend.iter().enumerate() {
354// let signature = self.signer.sign_message_ecdsa_with_key(
355// &payload_hash,
356// &leaf_to_spend_public_keys[i],
357// false,
358// )?;
359
360// operator_specific_signatures.push(OperatorSpecificTokenTransactionSignature {
361// owner_public_key: leaf_to_spend_public_keys[i].clone(),
362// owner_signature: signature,
363// payload: Some(payload.clone()),
364// });
365// }
366// }
367
368// println!("passed 6");
369
370// // create a 2D vector to store keyshares and indices for each leaf from each operator.
371// // this will be unfilled if its an issuance transaction.
372// let mut leaf_revocation_keyshares = vec![];
373// for (i, operator) in self.config.spark_config.spark_operators.iter().enumerate() {
374// let mut operator_client = if i == 0 {
375// // avoid recreating an arc reference to the coordinator client
376// coordinator_client.clone()
377// } else {
378// self.config
379// .spark_config
380// .get_spark_connection(Some(operator.id))
381// .await?
382// };
383
384// // send the sign request to every operator
385// let mut request = tonic::Request::new(SignTokenTransactionRequest {
386// final_token_transaction: start_response.final_token_transaction.clone(),
387// operator_specific_signatures: operator_specific_signatures.clone(),
388// identity_public_key: self.get_identity_public_key().to_vec(),
389// });
390// self.add_authorization_header_to_request(&mut request, None);
391// let response = operator_client.sign_token_transaction(request).await?;
392// let response = response.into_inner();
393
394// let operator_signature = response.spark_operator_signature;
395// let token_transaction_revocation_keyshares =
396// response.token_transaction_revocation_keyshares;
397// validate_ownership_signature(
398// &operator_signature,
399// &final_token_transaction_hash,
400// &operator.identity_public_key,
401// )?;
402
403// // store each leaf's keyshare and operator index
404// for (leaf_index, keyshare) in token_transaction_revocation_keyshares
405// .into_iter()
406// .enumerate()
407// {
408// // Extend leaf_revocation_keyshares vec if needed
409// while leaf_revocation_keyshares.len() <= leaf_index {
410// leaf_revocation_keyshares.push(vec![]);
411// }
412
413// leaf_revocation_keyshares[leaf_index].push(KeyshareWithOperatorIndex {
414// keyshare,
415// index: i as u32,
416// });
417// }
418// }
419
420// println!("passed 7");
421
422// // finalization only required for transfer transactions.
423// if get_token_transaction_transfer_input(token_transaction).is_some() {
424// let mut leaf_recovered_secrets = vec![];
425// for (i, leaf_keyshares) in leaf_revocation_keyshares.iter().enumerate() {
426// if leaf_keyshares.len() < keyshare_info.threshold as usize {
427// return Err(SparkSdkError::InvalidKeyshareConfig(
428// "Not enough keyshares to recover leaf secret".to_string(),
429// ));
430// }
431
432// // check for duplicate operator indices
433// let mut seen_indices = HashMap::new();
434// for keyshare in leaf_keyshares {
435// if seen_indices.contains_key(&keyshare.index) {
436// return Err(SparkSdkError::InvalidKeyshareConfig(
437// "Duplicate keyshare index found".to_string(),
438// ));
439// }
440// seen_indices.insert(keyshare.index, true);
441// }
442
443// // Use the keyshares to recover the secret
444// let mut shares = vec![];
445// for (_, keyshare_operator_index) in leaf_keyshares.iter().enumerate() {
446// println!(
447// "Keyshare from operator {}: {}",
448// keyshare_operator_index.index,
449// hex::encode(&keyshare_operator_index.keyshare)
450// );
451
452// // Convert the keyshare bytes to a scalar using from_uint_unchecked
453// let secret_share = FeldmanShare {
454// index: keyshare_operator_index.index,
455// value: k256::Scalar::from_uint_unchecked(
456// k256::elliptic_curve::bigint::U256::from_be_slice(
457// &keyshare_operator_index.keyshare,
458// ),
459// ),
460// threshold: keyshare_info.threshold as usize,
461// };
462// shares.push(secret_share);
463// }
464
465// // Sort shares by index to ensure consistent order
466// shares.sort_by_key(|share| share.index);
467
468// // Take only threshold number of shares
469// shares.truncate(keyshare_info.threshold as usize);
470
471// // Recover the secret using k256-based implementation
472// let recovered_key = recover_secret_new(&shares)
473// .map_err(|e| SparkSdkError::InvalidKeyshareConfig(e.to_string()))?;
474// let recovered_key_bytes = recovered_key.to_bytes().to_vec();
475
476// // Convert recovered key to a secp256k1 key pair
477// let secp = bitcoin::secp256k1::Secp256k1::new();
478// let secret_key = bitcoin::secp256k1::SecretKey::from_slice(&recovered_key_bytes)
479// .map_err(|e| {
480// SparkSdkError::InvalidKeyshareConfig(format!("Invalid secret key: {}", e))
481// })?;
482// let public_key = bitcoin::secp256k1::PublicKey::from_secret_key(&secp, &secret_key);
483
484// // Get the original key
485// let original_key = bitcoin::secp256k1::PublicKey::from_slice(
486// &leaf_to_spend_revocation_public_keys[i],
487// )
488// .map_err(|e| {
489// SparkSdkError::InvalidKeyshareConfig(format!("Invalid public key: {}", e))
490// })?;
491
492// // Compare the x-coordinates of the public keys
493// if public_key.serialize()[1..33] != original_key.serialize()[1..33] {
494// return Err(SparkSdkError::InvalidKeyshareConfig(format!(
495// "Recovered secret for leaf {} does not match leaf public key. Recovered: {}, Expected: {}",
496// i,
497// hex::encode(public_key.serialize()),
498// hex::encode(original_key.serialize())
499// )));
500// }
501
502// // add to vec
503// leaf_recovered_secrets.push(recovered_key_bytes);
504// }
505
506// println!("passed 8");
507
508// // TODO: validate revocation keys (utils.ValidateRevocationKeys)
509
510// // finalize the token transaction with each operator
511// for operator in self.config.spark_config.spark_operators.iter() {
512// let mut operator_client = self
513// .config
514// .spark_config
515// .get_spark_connection(Some(operator.id))
516// .await?;
517// let mut request = tonic::Request::new(FinalizeTokenTransactionRequest {
518// final_token_transaction: start_response.final_token_transaction.clone(),
519// leaf_to_spend_revocation_keys: leaf_recovered_secrets.clone(),
520// identity_public_key: self.get_identity_public_key().to_vec(),
521// });
522// self.add_authorization_header_to_request(&mut request, None);
523// operator_client.finalize_token_transaction(request).await?;
524// }
525// }
526
527// Ok(final_token_transaction)
528// }
529// }
530
531// fn get_token_transaction_issue_input(token_transaction: &TokenTransaction) -> Option<MintInput> {
532// if let Some(token_input) = &token_transaction.token_input {
533// if let TokenInput::MintInput(issue_input) = token_input {
534// return Some(issue_input.clone());
535// }
536// }
537// None
538// }
539
540// fn get_token_transaction_transfer_input(
541// token_transaction: &TokenTransaction,
542// ) -> Option<TransferInput> {
543// if let Some(token_input) = &token_transaction.token_input {
544// if let TokenInput::TransferInput(transfer_input) = token_input {
545// return Some(transfer_input.clone());
546// }
547// }
548// None
549// }
550
551// /// Generates a SHA256 hash of the TokenTransaction by:
552// /// 1. Taking SHA256 of each field individually
553// /// 2. Concatenating all field hashes in order
554// /// 3. Taking SHA256 of the concatenated hashes
555// ///
556// /// If `partial_hash` is true, generates a partial hash even if the provided transaction is final.
557// ///
558// /// # Arguments
559// ///
560// /// * `token_transaction` - The TokenTransaction to hash
561// /// * `partial_hash` - Whether to generate a partial hash even for final transactions
562// ///
563// /// # Returns
564// ///
565// /// * `Vec<u8>` - The resulting hash as a byte vector
566// fn hash_token_transaction(token_transaction: &TokenTransaction, partial_hash: bool) -> Vec<u8> {
567// let mut h = sha2::Sha256::new();
568// let mut all_hashes = vec![];
569
570// // Hash input leaves if transfer input is present.
571// if let Some(transfer_input) = get_token_transaction_transfer_input(token_transaction) {
572// for leaf in transfer_input.leaves_to_spend {
573// if !leaf.prev_token_transaction_hash.is_empty() {
574// h.write(&leaf.prev_token_transaction_hash).unwrap();
575// }
576// let mut buf = [0u8; 4];
577// buf.copy_from_slice(&leaf.prev_token_transaction_leaf_vout.to_be_bytes());
578// h.write(&buf).unwrap();
579
580// // Finalize & collect this chunk's hash.
581// let hash = h.finalize_reset();
582// all_hashes.push(hash);
583// }
584// }
585// // Otherwise, hash mint input if an issue input is present.
586// else if let Some(mint_input) = get_token_transaction_issue_input(token_transaction) {
587// if !mint_input.issuer_public_key.is_empty() {
588// h.write(&mint_input.issuer_public_key).unwrap();
589// let hash = h.finalize_reset();
590// all_hashes.push(hash);
591// }
592// }
593
594// // Hash output leaves
595// for leaf in &token_transaction.output_leaves {
596// h.reset();
597
598// // Only hash the leaf ID if full (non-partial) hash is desired
599// if let Some(id) = &leaf.id {
600// if !partial_hash {
601// h.write(id.as_bytes()).unwrap();
602// }
603// }
604
605// // Owner public key
606// if !leaf.owner_public_key.is_empty() {
607// h.write(&leaf.owner_public_key).unwrap();
608// }
609
610// // If this is a full hash, also hash the revocation public key
611// if let Some(ref rev_key) = leaf.revocation_public_key {
612// if !partial_hash {
613// h.write(rev_key).unwrap();
614// }
615// }
616
617// // Hash withdrawal bond and locktime for a full hash
618// if !partial_hash {
619// if let Some(bond) = leaf.withdraw_bond_sats {
620// let mut withdrawal_bond_bytes = [0u8; 8];
621// withdrawal_bond_bytes.copy_from_slice(&bond.to_be_bytes());
622// h.write(&withdrawal_bond_bytes).unwrap();
623// }
624
625// if let Some(locktime) = leaf.withdraw_relative_block_locktime {
626// let mut withdrawal_locktime_bytes = [0u8; 8];
627// withdrawal_locktime_bytes.copy_from_slice(&locktime.to_be_bytes());
628// h.write(&withdrawal_locktime_bytes).unwrap();
629// }
630// }
631
632// // Token public key
633// if !leaf.token_public_key.is_empty() {
634// h.write(&leaf.token_public_key).unwrap();
635// }
636
637// // Token amount
638// if !leaf.token_amount.is_empty() {
639// h.write(&leaf.token_amount).unwrap();
640// }
641
642// // Finalize & collect this leaf's hash.
643// let hash = h.finalize_reset();
644// all_hashes.push(hash);
645// }
646
647// // Sort and then hash Spark operator public keys.
648// let mut sorted_pubkeys = token_transaction
649// .spark_operator_identity_public_keys
650// .clone();
651// sorted_pubkeys.sort();
652// for public_key in sorted_pubkeys {
653// h.reset();
654// if !public_key.is_empty() {
655// h.write(&public_key).unwrap();
656// }
657// let hash = h.finalize_reset();
658// all_hashes.push(hash);
659// }
660
661// // Final pass: hash all collected "sub-hashes" together
662// h.reset();
663// for hash in all_hashes {
664// h.write(&hash).unwrap();
665// }
666// let final_hash = h.finalize_reset();
667
668// final_hash.to_vec()
669// }
670
671// /// Hashes the operator-specific token transaction signable payload.
672// ///
673// /// This function takes a payload containing a final token transaction hash and an operator identity public key,
674// /// and produces a single hash by:
675// /// 1. Hashing the final token transaction hash if present
676// /// 2. Hashing the operator identity public key if present
677// /// 3. Concatenating and hashing all the intermediate hashes together
678// ///
679// /// # Arguments
680// /// * `payload` - The operator-specific token transaction payload to hash
681// ///
682// /// # Returns
683// ///
684// /// * `Vec<u8>` - The final hash as a byte vector
685// fn hash_operator_specific_token_transaction_signable_payload(
686// payload: &OperatorSpecificTokenTransactionSignablePayload,
687// ) -> Vec<u8> {
688// let mut h = sha2::Sha256::new();
689// let mut all_hashes = vec![];
690
691// // hash the final token transaction hash
692// if payload.final_token_transaction_hash.len() > 0 {
693// h.write(&payload.final_token_transaction_hash).unwrap();
694// let hash = h.finalize_reset();
695// all_hashes.push(hash);
696// }
697
698// // hash the operator identity public key
699// if payload.operator_identity_public_key.len() > 0 {
700// h.write(&payload.operator_identity_public_key).unwrap();
701// let hash = h.finalize_reset();
702// all_hashes.push(hash);
703// }
704
705// // final hash of all concatenated hashes
706// h.reset();
707// for hash in all_hashes {
708// h.write(&hash).unwrap();
709// }
710// let final_hash = h.finalize_reset();
711
712// final_hash.to_vec()
713// }
714
715// fn validate_ownership_signature(
716// ownership_signature: &Vec<u8>,
717// partial_token_transaction_hash: &Vec<u8>,
718// owner_public_key_bytes: &Vec<u8>,
719// ) -> Result<(), SparkSdkError> {
720// if ownership_signature.len() == 0 {
721// return Err(SparkSdkError::InvalidTokenTransaction(
722// "Ownership signature is empty".to_string(),
723// ));
724// };
725
726// if partial_token_transaction_hash.len() == 0 {
727// return Err(SparkSdkError::InvalidTokenTransaction(
728// "Partial token transaction hash is empty".to_string(),
729// ));
730// };
731
732// if owner_public_key_bytes.len() == 0 {
733// return Err(SparkSdkError::InvalidTokenTransaction(
734// "Owner public key is empty".to_string(),
735// ));
736// };
737
738// let sig = bitcoin::secp256k1::ecdsa::Signature::from_der(&ownership_signature)
739// .map_err(|e| SparkSdkError::InvalidTokenTransaction(format!("Invalid signature: {}", e)))?;
740
741// let owner_public_key = parse_public_key(&owner_public_key_bytes)?;
742
743// let secp = bitcoin::secp256k1::Secp256k1::new();
744// let message = bitcoin::secp256k1::Message::from_digest_slice(&partial_token_transaction_hash)
745// .map_err(|e| {
746// SparkSdkError::InvalidTokenTransaction(format!("Invalid message: {}", e))
747// })?;
748
749// secp.verify_ecdsa(&message, &sig, &owner_public_key)?;
750
751// Ok(())
752// }