TF-M Reference Manual  1.2.0
TrustedFirmware-M
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
spm_func.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2020, Arm Limited. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  *
6  */
7 
8 #include <stdint.h>
9 #include <stdbool.h>
10 #include <arm_cmse.h>
11 #include "tfm_nspm.h"
12 #include "tfm_api.h"
13 #include "tfm_arch.h"
14 #include "tfm_irq_list.h"
15 #include "psa/service.h"
16 #include "tfm_core_mem_check.h"
17 #include "tfm_peripherals_def.h"
18 #include "tfm_secure_api.h"
19 #include "tfm_spm_hal.h"
20 #include "tfm_core_trustzone.h"
21 #include "spm_func.h"
22 #include "region_defs.h"
23 #include "region.h"
24 #include "spm_partition_defs.h"
25 #include "psa_manifest/pid.h"
26 #include "tfm/tfm_spm_services.h"
27 #include "tfm_spm_db_func.inc"
28 
29 #define EXC_RETURN_SECURE_FUNCTION 0xFFFFFFFD
30 #define EXC_RETURN_SECURE_HANDLER 0xFFFFFFF1
31 
32 #ifndef TFM_LVL
33 #error TFM_LVL is not defined!
34 #endif
35 
36 #ifdef TFM_MULTI_CORE_TOPOLOGY
37 #error Multi core is not supported by Function mode
38 #endif
39 
40 REGION_DECLARE_T(Image$$, TFM_SECURE_STACK, $$ZI$$Base, uint32_t);
41 REGION_DECLARE_T(Image$$, TFM_SECURE_STACK, $$ZI$$Limit, struct iovec_args_t)[];
42 
43 static uint32_t *tfm_secure_stack_seal =
44  ((uint32_t *)&REGION_NAME(Image$$, TFM_SECURE_STACK, $$ZI$$Limit)[-1]) - 2;
45 
46 REGION_DECLARE_T(Image$$, ARM_LIB_STACK_SEAL, $$ZI$$Base, uint32_t);
47 
48 /*
49  * Function to seal the psp stacks for Function model of TF-M.
50  */
52 {
53  /*
54  * The top of TFM_SECURE_STACK is used for iovec parameters, we need to
55  * place the seal between iovec parameters and partition stack.
56  *
57  * Image$$TFM_SECURE_STACK$$ZI$$Limit-> +-------------------------+
58  * | |
59  * | iovec parameters for |
60  * | partition |
61  * (Image$$TFM_SECURE_STACK$$ZI$$Limit -| |
62  * sizeof(iovec_args_t)) -> +-------------------------+
63  * | Stack Seal |
64  * +-------------------------+
65  * | |
66  * | Partition stack |
67  * | |
68  * Image$$TFM_SECURE_STACK$$ZI$$Base-> +-------------------------+
69  */
70  *(tfm_secure_stack_seal) = TFM_STACK_SEAL_VALUE;
71  *(tfm_secure_stack_seal + 1) = TFM_STACK_SEAL_VALUE;
72 
73  /*
74  * Seal the ARM_LIB_STACK by writing the seal value to the reserved
75  * region.
76  */
77  uint32_t *arm_lib_stck_seal_base = (uint32_t *)&REGION_NAME(Image$$,
78  ARM_LIB_STACK_SEAL, $$ZI$$Base);
79 
80  *(arm_lib_stck_seal_base) = TFM_STACK_SEAL_VALUE;
81  *(arm_lib_stck_seal_base + 1) = TFM_STACK_SEAL_VALUE;
82 }
83 
84 /*
85  * This is the "Big Lock" on the secure side, to guarantee single entry
86  * to SPE
87  */
88 static int32_t tfm_secure_lock;
89 static int32_t tfm_secure_api_initializing = 1;
90 
91 static uint32_t *prepare_partition_iovec_ctx(
92  const struct tfm_state_context_t *svc_ctx,
93  const struct tfm_sfn_req_s *desc_ptr,
94  const struct iovec_args_t *iovec_args,
95  uint32_t *dst)
96 {
97  /* XPSR = as was when called, but make sure it's thread mode */
98  *(--dst) = svc_ctx->xpsr & 0xFFFFFE00U;
99  /* ReturnAddress = resume veneer in new context */
100  *(--dst) = svc_ctx->ra;
101  /* LR = sfn address */
102  *(--dst) = (uint32_t)desc_ptr->sfn;
103  /* R12 = don't care */
104  *(--dst) = 0U;
105 
106  /* R0-R3 = sfn arguments */
107  *(--dst) = iovec_args->out_len;
108  *(--dst) = (uint32_t)iovec_args->out_vec;
109  *(--dst) = iovec_args->in_len;
110  *(--dst) = (uint32_t)iovec_args->in_vec;
111 
112  return dst;
113 }
114 
127 static int32_t *prepare_partition_irq_ctx(
128  const struct tfm_state_context_t *svc_ctx,
129  sfn_t unpriv_handler,
130  int32_t *dst)
131 {
132  int i;
133 
134  /* XPSR = as was when called, but make sure it's thread mode */
135  *(--dst) = svc_ctx->xpsr & 0xFFFFFE00;
136  /* ReturnAddress = resume to the privileged handler code, but execute it
137  * unprivileged.
138  */
139  *(--dst) = svc_ctx->ra;
140  /* LR = start address */
141  *(--dst) = (int32_t)unpriv_handler;
142 
143  /* R12, R0-R3 unused arguments */
144  for (i = 0; i < 5; ++i) {
145  *(--dst) = 0;
146  }
147 
148  return dst;
149 }
150 
151 static void restore_caller_ctx(const struct tfm_state_context_t *svc_ctx,
152  struct tfm_state_context_t *target_ctx)
153 {
154  /* ReturnAddress = resume veneer after second SVC */
155  target_ctx->ra = svc_ctx->ra;
156 
157  /* R0 = function return value */
158  target_ctx->r0 = svc_ctx->r0;
159 
160  return;
161 }
162 
172 static enum tfm_status_e tfm_core_check_sfn_parameters(
173  const struct tfm_sfn_req_s *desc_ptr)
174 {
175  struct psa_invec *in_vec = (psa_invec *)desc_ptr->args[0];
176  size_t in_len;
177  struct psa_outvec *out_vec = (psa_outvec *)desc_ptr->args[2];
178  size_t out_len;
179  uint32_t i;
180  uint32_t privileged_mode = TFM_PARTITION_UNPRIVILEGED_MODE;
181 
182  if ((desc_ptr->args[1] < 0) || (desc_ptr->args[3] < 0)) {
184  }
185 
186  in_len = (size_t)(desc_ptr->args[1]);
187  out_len = (size_t)(desc_ptr->args[3]);
188 
189  /*
190  * Get caller's privileged mode:
191  * The privileged mode of NS Secure Service caller will be decided by the
192  * tfm_core_has_xxx_access_to_region functions.
193  * Secure caller can be only privileged mode because the whole SPE is
194  * running under privileged mode
195  */
196  if (!desc_ptr->ns_caller) {
197  privileged_mode = TFM_PARTITION_PRIVILEGED_MODE;
198  }
199 
200  /* The number of vectors are within range. Extra checks to avoid overflow */
201  if ((in_len > PSA_MAX_IOVEC) || (out_len > PSA_MAX_IOVEC) ||
202  (in_len + out_len > PSA_MAX_IOVEC)) {
204  }
205 
206  /* Check whether the caller partition has at write access to the iovec
207  * structures themselves. Use the TT instruction for this.
208  */
209  if (in_len > 0) {
210  if ((in_vec == NULL) ||
212  sizeof(psa_invec)*in_len, desc_ptr->ns_caller,
213  privileged_mode) != TFM_SUCCESS)) {
215  }
216  } else {
217  if (in_vec != NULL) {
219  }
220  }
221  if (out_len > 0) {
222  if ((out_vec == NULL) ||
224  sizeof(psa_outvec)*out_len, desc_ptr->ns_caller,
225  privileged_mode) != TFM_SUCCESS)) {
227  }
228  } else {
229  if (out_vec != NULL) {
231  }
232  }
233 
234  /* Check whether the caller partition has access to the data inside the
235  * iovecs
236  */
237  for (i = 0; i < in_len; ++i) {
238  if (in_vec[i].len > 0) {
239  if ((in_vec[i].base == NULL) ||
241  in_vec[i].len, desc_ptr->ns_caller,
242  privileged_mode) != TFM_SUCCESS)) {
244  }
245  }
246  }
247  for (i = 0; i < out_len; ++i) {
248  if (out_vec[i].len > 0) {
249  if ((out_vec[i].base == NULL) ||
251  out_vec[i].len, desc_ptr->ns_caller,
252  privileged_mode) != TFM_SUCCESS)) {
254  }
255  }
256  }
257 
258  return TFM_SUCCESS;
259 }
260 
261 static void tfm_copy_iovec_parameters(struct iovec_args_t *target,
262  const struct iovec_args_t *source)
263 {
264  size_t i;
265 
266  /* The vectors have been sanity checked already, and since then the
267  * interrupts have been kept disabled. So we can be sure that the
268  * vectors haven't been tampered with since the check. So it is safe to pass
269  * it to the called partition.
270  */
271 
272  target->in_len = source->in_len;
273  for (i = 0; i < source->in_len; ++i) {
274  target->in_vec[i].base = source->in_vec[i].base;
275  target->in_vec[i].len = source->in_vec[i].len;
276  }
277  target->out_len = source->out_len;
278  for (i = 0; i < source->out_len; ++i) {
279  target->out_vec[i].base = source->out_vec[i].base;
280  target->out_vec[i].len = source->out_vec[i].len;
281  }
282 }
283 
284 static void tfm_clear_iovec_parameters(struct iovec_args_t *args)
285 {
286  int i;
287 
288  args->in_len = 0;
289  for (i = 0; i < PSA_MAX_IOVEC; ++i) {
290  args->in_vec[i].base = NULL;
291  args->in_vec[i].len = 0;
292  }
293  args->out_len = 0;
294  for (i = 0; i < PSA_MAX_IOVEC; ++i) {
295  args->out_vec[i].base = NULL;
296  args->out_vec[i].len = 0;
297  }
298 }
299 
309 static enum tfm_status_e check_partition_state(uint32_t curr_partition_state,
310  uint32_t caller_partition_state)
311 {
312  if (caller_partition_state != SPM_PARTITION_STATE_RUNNING) {
313  /* Calling partition from non-running state (e.g. during handling IRQ)
314  * is not allowed.
315  */
317  }
318 
319  if (curr_partition_state == SPM_PARTITION_STATE_RUNNING ||
320  curr_partition_state == SPM_PARTITION_STATE_HANDLING_IRQ ||
321  curr_partition_state == SPM_PARTITION_STATE_SUSPENDED ||
322  curr_partition_state == SPM_PARTITION_STATE_BLOCKED) {
323  /* Active partitions cannot be called! */
325  } else if (curr_partition_state != SPM_PARTITION_STATE_IDLE) {
326  /* The partition to be called is not in a proper state */
327  return TFM_SECURE_LOCK_FAILED;
328  }
329  return TFM_SUCCESS;
330 }
331 
340 static enum tfm_status_e check_irq_partition_state(
341  uint32_t called_partition_state)
342 {
343  if (called_partition_state == SPM_PARTITION_STATE_IDLE ||
344  called_partition_state == SPM_PARTITION_STATE_RUNNING ||
345  called_partition_state == SPM_PARTITION_STATE_HANDLING_IRQ ||
346  called_partition_state == SPM_PARTITION_STATE_SUSPENDED ||
347  called_partition_state == SPM_PARTITION_STATE_BLOCKED) {
348  return TFM_SUCCESS;
349  }
350  return TFM_SECURE_LOCK_FAILED;
351 }
352 
361 static struct iovec_args_t *get_iovec_args_stack_address(uint32_t partition_idx)
362 {
363  /* Save the iovecs on the common stack. */
364  return &REGION_NAME(Image$$, TFM_SECURE_STACK, $$ZI$$Limit)[-1];
365 }
366 
375 static uint32_t get_partition_idx(uint32_t partition_id)
376 {
377  uint32_t i;
378 
379  if (partition_id == INVALID_PARTITION_ID) {
381  }
382 
383  for (i = 0; i < g_spm_partition_db.partition_count; ++i) {
385  partition_id) {
386  return i;
387  }
388  }
390 }
391 
401 static uint32_t tfm_spm_partition_get_flags(uint32_t partition_idx)
402 {
403  return g_spm_partition_db.partitions[partition_idx].static_data->
404  partition_flags;
405 }
406 
407 static enum tfm_status_e tfm_start_partition(
408  const struct tfm_sfn_req_s *desc_ptr,
409  uint32_t excReturn)
410 {
411  enum tfm_status_e res;
412  uint32_t caller_partition_idx = desc_ptr->caller_part_idx;
413  const struct spm_partition_runtime_data_t *curr_part_data;
414  const struct spm_partition_runtime_data_t *caller_part_data;
415  uint32_t caller_flags;
416  register uint32_t partition_idx;
417  uint32_t psp;
418  uint32_t partition_psp, partition_psplim;
419  uint32_t partition_state;
420  uint32_t caller_partition_state;
421  struct tfm_state_context_t *svc_ctx;
422  uint32_t caller_partition_id;
423  int32_t client_id;
424  struct iovec_args_t *iovec_args;
425 
426  psp = __get_PSP();
427  svc_ctx = (struct tfm_state_context_t *)psp;
428  caller_flags = tfm_spm_partition_get_flags(caller_partition_idx);
429 
430  /* Check partition state consistency */
431  if (((caller_flags & SPM_PART_FLAG_APP_ROT) != 0)
432  != (!desc_ptr->ns_caller)) {
433  /* Partition state inconsistency detected */
434  return TFM_SECURE_LOCK_FAILED;
435  }
436 
437  partition_idx = get_partition_idx(desc_ptr->sp_id);
438 
439  curr_part_data = tfm_spm_partition_get_runtime_data(partition_idx);
440  caller_part_data = tfm_spm_partition_get_runtime_data(caller_partition_idx);
441  partition_state = curr_part_data->partition_state;
442  caller_partition_state = caller_part_data->partition_state;
443  caller_partition_id = tfm_spm_partition_get_partition_id(
444  caller_partition_idx);
445 
446  if (!tfm_secure_api_initializing) {
447  res = check_partition_state(partition_state, caller_partition_state);
448  if (res != TFM_SUCCESS) {
449  return res;
450  }
451  }
452 
453  /* Prepare switch to shared secure partition stack */
454  /* In case the call is coming from the non-secure world, we save the iovecs
455  * on the stop of the stack. Also the stack seal is present below this region.
456  * So the memory area, that can actually be used as stack by the partitions
457  * starts at a lower address.
458  */
459  partition_psp = (uint32_t) tfm_secure_stack_seal;
460  partition_psplim =
461  (uint32_t)&REGION_NAME(Image$$, TFM_SECURE_STACK, $$ZI$$Base);
462 
463  /* Store the context for the partition call */
465  caller_partition_idx);
466  tfm_spm_partition_store_context(caller_partition_idx, psp, excReturn);
467 
468  if ((caller_flags & SPM_PART_FLAG_APP_ROT)) {
470  caller_partition_id);
471  } else {
472  client_id = tfm_nspm_get_current_client_id();
473  if (client_id >= 0) {
474  return TFM_SECURE_LOCK_FAILED;
475  }
476  tfm_spm_partition_set_caller_client_id(partition_idx, client_id);
477  }
478 
479  /* In level one, only switch context and return from exception if in
480  * handler mode
481  */
482  if ((desc_ptr->ns_caller) || (tfm_secure_api_initializing)) {
483  if (tfm_spm_partition_set_iovec(partition_idx, desc_ptr->args) !=
484  SPM_ERR_OK) {
485  return TFM_ERROR_GENERIC;
486  }
487  iovec_args = get_iovec_args_stack_address(partition_idx);
488  tfm_copy_iovec_parameters(iovec_args, &(curr_part_data->iovec_args));
489 
490  /* Prepare the partition context, update stack ptr */
491  psp = (uint32_t)prepare_partition_iovec_ctx(svc_ctx, desc_ptr,
492  iovec_args,
493  (uint32_t *)partition_psp);
494  __set_PSP(psp);
495  tfm_arch_set_psplim(partition_psplim);
496  }
497 
498  tfm_spm_partition_set_state(caller_partition_idx,
501  tfm_secure_lock++;
502 
503  return TFM_SUCCESS;
504 }
505 
506 static enum tfm_status_e tfm_start_partition_for_irq_handling(
507  uint32_t excReturn,
508  struct tfm_state_context_t *svc_ctx)
509 {
510  uint32_t handler_partition_id = svc_ctx->r0;
511  sfn_t unpriv_handler = (sfn_t)svc_ctx->r1;
512  uint32_t irq_signal = svc_ctx->r2;
513  IRQn_Type irq_line = (IRQn_Type) svc_ctx->r3;
514  enum tfm_status_e res;
515  uint32_t psp = __get_PSP();
516  uint32_t handler_partition_psp;
517  uint32_t handler_partition_state;
518  uint32_t interrupted_partition_idx =
520  const struct spm_partition_runtime_data_t *handler_part_data;
521  uint32_t handler_partition_idx;
522 
523  handler_partition_idx = get_partition_idx(handler_partition_id);
524  handler_part_data = tfm_spm_partition_get_runtime_data(
525  handler_partition_idx);
526  handler_partition_state = handler_part_data->partition_state;
527 
528  res = check_irq_partition_state(handler_partition_state);
529  if (res != TFM_SUCCESS) {
530  return res;
531  }
532 
533  /* set mask for the partition */
535  handler_partition_idx,
536  handler_part_data->signal_mask | irq_signal);
537 
538  tfm_spm_hal_disable_irq(irq_line);
539 
540  /* save the current context of the interrupted partition */
541  tfm_spm_partition_push_interrupted_ctx(interrupted_partition_idx);
542 
543  handler_partition_psp = psp;
544 
545  /* save the current context of the handler partition */
546  tfm_spm_partition_push_handler_ctx(handler_partition_idx);
547 
548  /* Store caller for the partition */
549  tfm_spm_partition_set_caller_partition_idx(handler_partition_idx,
550  interrupted_partition_idx);
551 
552  psp = (uint32_t)prepare_partition_irq_ctx(svc_ctx, unpriv_handler,
553  (int32_t *)handler_partition_psp);
554  __set_PSP(psp);
555 
556  tfm_spm_partition_set_state(interrupted_partition_idx,
558  tfm_spm_partition_set_state(handler_partition_idx,
560 
561  return TFM_SUCCESS;
562 }
563 
564 static enum tfm_status_e tfm_return_from_partition(uint32_t *excReturn)
565 {
566  uint32_t current_partition_idx =
568  const struct spm_partition_runtime_data_t *curr_part_data, *ret_part_data;
569  uint32_t return_partition_idx;
570  uint32_t return_partition_flags;
571  uint32_t psp = __get_PSP();
572  size_t i;
573  struct tfm_state_context_t *svc_ctx = (struct tfm_state_context_t *)psp;
574  struct iovec_args_t *iovec_args;
575 
576  if (current_partition_idx == SPM_INVALID_PARTITION_IDX) {
578  }
579 
580  curr_part_data = tfm_spm_partition_get_runtime_data(current_partition_idx);
581  return_partition_idx = curr_part_data->caller_partition_idx;
582 
583  if (return_partition_idx == SPM_INVALID_PARTITION_IDX) {
585  }
586 
587  ret_part_data = tfm_spm_partition_get_runtime_data(return_partition_idx);
588 
589  return_partition_flags = tfm_spm_partition_get_flags(return_partition_idx);
590 
591  tfm_secure_lock--;
592 
593  if (!(return_partition_flags & SPM_PART_FLAG_APP_ROT) ||
594  (tfm_secure_api_initializing)) {
595  /* In TFM level 1 context restore is only done when
596  * returning to NS or after initialization
597  */
598  /* Restore caller context */
599  restore_caller_ctx(svc_ctx,
600  (struct tfm_state_context_t *)ret_part_data->stack_ptr);
601  *excReturn = ret_part_data->lr;
602  __set_PSP(ret_part_data->stack_ptr);
603  REGION_DECLARE_T(Image$$, ARM_LIB_STACK, $$ZI$$Base, uint32_t)[];
604  uint32_t psp_stack_bottom =
605  (uint32_t)REGION_NAME(Image$$, ARM_LIB_STACK, $$ZI$$Base);
606  tfm_arch_set_psplim(psp_stack_bottom);
607 
608  iovec_args = &REGION_NAME(Image$$, TFM_SECURE_STACK, $$ZI$$Limit)[-1];
609 
610  for (i = 0; i < curr_part_data->iovec_args.out_len; ++i) {
611  curr_part_data->orig_outvec[i].len = iovec_args->out_vec[i].len;
612  }
613  tfm_clear_iovec_parameters(iovec_args);
614  }
615 
616  tfm_spm_partition_cleanup_context(current_partition_idx);
617 
618  tfm_spm_partition_set_state(current_partition_idx,
620  tfm_spm_partition_set_state(return_partition_idx,
622 
623  return TFM_SUCCESS;
624 }
625 
626 static enum tfm_status_e tfm_return_from_partition_irq_handling(
627  uint32_t *excReturn)
628 {
629  uint32_t handler_partition_idx =
631  const struct spm_partition_runtime_data_t *handler_part_data;
632  uint32_t interrupted_partition_idx;
633  uint32_t psp = __get_PSP();
634  struct tfm_state_context_t *svc_ctx = (struct tfm_state_context_t *)psp;
635 
636  if (handler_partition_idx == SPM_INVALID_PARTITION_IDX) {
638  }
639 
640  handler_part_data = tfm_spm_partition_get_runtime_data(
641  handler_partition_idx);
642  interrupted_partition_idx = handler_part_data->caller_partition_idx;
643 
644  if (interrupted_partition_idx == SPM_INVALID_PARTITION_IDX) {
646  }
647 
648  /* For level 1, modify PSP, so that the SVC stack frame disappears,
649  * and return to the privileged handler using the stack frame still on the
650  * MSP stack.
651  */
652  *excReturn = svc_ctx->ra;
653  psp += sizeof(struct tfm_state_context_t);
654 
655  tfm_spm_partition_pop_handler_ctx(handler_partition_idx);
656  tfm_spm_partition_pop_interrupted_ctx(interrupted_partition_idx);
657 
658  __set_PSP(psp);
659 
660  return TFM_SUCCESS;
661 }
662 
663 static enum tfm_status_e tfm_check_sfn_req_integrity(
664  const struct tfm_sfn_req_s *desc_ptr)
665 {
666  if ((desc_ptr == NULL) ||
667  (desc_ptr->sp_id == 0) ||
668  (desc_ptr->sfn == NULL)) {
669  /* invalid parameter */
671  }
672  return TFM_SUCCESS;
673 }
674 
675 static enum tfm_status_e tfm_core_check_sfn_req_rules(
676  const struct tfm_sfn_req_s *desc_ptr)
677 {
678  /* Check partition idx validity */
679  if (desc_ptr->caller_part_idx == SPM_INVALID_PARTITION_IDX) {
681  }
682 
683  if ((desc_ptr->ns_caller) && (tfm_secure_lock != 0)) {
684  /* Secure domain is already locked!
685  * This should only happen if caller is secure partition!
686  */
687  /* This scenario is a potential security breach.
688  * Error is handled in caller.
689  */
691  }
692 
693  if (tfm_secure_api_initializing) {
694  int32_t id =
696 
697  if ((id != TFM_SP_CORE_ID) || (tfm_secure_lock != 0)) {
698  /* Invalid request during system initialization */
699  ERROR_MSG("Invalid service request during initialization!");
701  }
702  }
703 
704  return TFM_SUCCESS;
705 }
706 
707 uint32_t tfm_spm_partition_get_partition_id(uint32_t partition_idx)
708 {
709  return g_spm_partition_db.partitions[partition_idx].static_data->
710  partition_id;
711 }
712 
713 uint32_t tfm_spm_partition_get_privileged_mode(uint32_t partition_flags)
714 {
715  if (partition_flags & SPM_PART_FLAG_PSA_ROT) {
717  } else {
719  }
720 }
721 
722 bool tfm_is_partition_privileged(uint32_t partition_idx)
723 {
724  uint32_t flags = tfm_spm_partition_get_flags(partition_idx);
725 
728 }
729 
731 {
732  tfm_secure_api_initializing = 0;
733 }
734 
736  struct tfm_sfn_req_s *desc_ptr, uint32_t excReturn)
737 {
738  enum tfm_status_e res;
739 
740  res = tfm_check_sfn_req_integrity(desc_ptr);
741  if (res != TFM_SUCCESS) {
742  ERROR_MSG("Invalid service request!");
744  }
745 
746  __disable_irq();
747 
749 
750  res = tfm_core_check_sfn_parameters(desc_ptr);
751  if (res != TFM_SUCCESS) {
752  /* The sanity check of iovecs failed. */
753  __enable_irq();
755  }
756 
757  res = tfm_core_check_sfn_req_rules(desc_ptr);
758  if (res != TFM_SUCCESS) {
759  /* FixMe: error compartmentalization TBD */
762  __enable_irq();
763  ERROR_MSG("Unauthorized service request!");
765  }
766 
767  res = tfm_start_partition(desc_ptr, excReturn);
768  if (res != TFM_SUCCESS) {
769  /* FixMe: consider possible fault scenarios */
770  __enable_irq();
771  ERROR_MSG("Failed to process service request!");
773  }
774 
775  __enable_irq();
776 
777  return res;
778 }
779 
781 {
782  enum tfm_status_e res;
783  int32_t *args;
784  int32_t retVal;
785 
786  res = tfm_core_check_sfn_parameters(desc_ptr);
787  if (res != TFM_SUCCESS) {
788  /* The sanity check of iovecs failed. */
789  return (int32_t)res;
790  }
791 
792  /* No excReturn value is needed as no exception handling is used */
793  res = tfm_spm_sfn_request_handler(desc_ptr, 0);
794 
795  if (res != TFM_SUCCESS) {
797  }
798 
799  /* Secure partition to secure partition call in TFM level 1 */
800  args = desc_ptr->args;
801  retVal = desc_ptr->sfn(args[0], args[1], args[2], args[3]);
802 
803  /* return handler should restore original exc_return value... */
804  res = tfm_return_from_partition(NULL);
805  if (res == TFM_SUCCESS) {
806  /* If unlock successful, pass SS return value to caller */
807  return retVal;
808  } else {
809  /* Unlock errors indicate ctx database corruption or unknown
810  * anomalies. Halt execution
811  */
812  ERROR_MSG("Secure API error during unlock!");
814  }
815  return (int32_t)res;
816 }
817 
818 int32_t tfm_spm_check_buffer_access(uint32_t partition_idx,
819  void *start_addr,
820  size_t len,
821  uint32_t alignment)
822 {
823  uintptr_t start_addr_value = (uintptr_t)start_addr;
824  uintptr_t end_addr_value = (uintptr_t)start_addr + len;
825  uintptr_t alignment_mask;
826 
827  alignment_mask = (((uintptr_t)1) << alignment) - 1;
828 
829  /* Check that the pointer is aligned properly */
830  if (start_addr_value & alignment_mask) {
831  /* not aligned, return error */
832  return 0;
833  }
834 
835  /* Protect against overflow (and zero len) */
836  if (end_addr_value <= start_addr_value) {
837  return 0;
838  }
839 
840  /* For privileged partition execution, all secure data memory and stack
841  * is accessible
842  */
843  if (start_addr_value >= S_DATA_START &&
844  end_addr_value <= (S_DATA_START + S_DATA_SIZE)) {
845  return 1;
846  }
847 
848  return 0;
849 }
850 
851 void tfm_spm_get_caller_client_id_handler(uint32_t *svc_args)
852 {
853  uintptr_t result_ptr_value = svc_args[0];
854  uint32_t running_partition_idx =
856  const uint32_t running_partition_flags =
857  tfm_spm_partition_get_flags(running_partition_idx);
858  const struct spm_partition_runtime_data_t *curr_part_data =
859  tfm_spm_partition_get_runtime_data(running_partition_idx);
860  int res = 0;
861 
862  if (!(running_partition_flags & SPM_PART_FLAG_APP_ROT) ||
864  curr_part_data->partition_state == SPM_PARTITION_STATE_SUSPENDED) {
865  /* This handler shouldn't be called from outside partition context.
866  * Also if the current partition is handling IRQ, the caller partition
867  * index might not be valid;
868  * Partitions are only allowed to run while S domain is locked.
869  */
870  svc_args[0] = (uint32_t)TFM_ERROR_INVALID_PARAMETER;
871  return;
872  }
873 
874  /* Make sure that the output pointer points to a memory area that is owned
875  * by the partition
876  */
877  res = tfm_spm_check_buffer_access(running_partition_idx,
878  (void *)result_ptr_value,
879  sizeof(curr_part_data->caller_client_id),
880  2);
881  if (!res) {
882  /* Not in accessible range, return error */
883  svc_args[0] = (uint32_t)TFM_ERROR_INVALID_PARAMETER;
884  return;
885  }
886 
887  *((int32_t *)result_ptr_value) = curr_part_data->caller_client_id;
888 
889  /* Store return value in r0 */
890  svc_args[0] = (uint32_t)TFM_SUCCESS;
891 }
892 
893 /* This SVC handler is called if veneer is running in thread mode */
895  const uint32_t *svc_ctx, uint32_t excReturn)
896 {
897  struct tfm_sfn_req_s *desc_ptr;
898 
899  if (!(excReturn & EXC_RETURN_STACK_PROCESS)) {
900  /* Service request SVC called with MSP active.
901  * Either invalid configuration for Thread mode or SVC called
902  * from Handler mode, which is not supported.
903  * FixMe: error severity TBD
904  */
905  ERROR_MSG("Service request SVC called with MSP active!");
907  }
908 
909  desc_ptr = (struct tfm_sfn_req_s *)svc_ctx[0];
910 
911  if (tfm_spm_sfn_request_handler(desc_ptr, excReturn) != TFM_SUCCESS) {
913  }
914 
916 }
917 
918 /* This SVC handler is called, if a thread mode execution environment is to
919  * be set up, to run an unprivileged IRQ handler
920  */
921 uint32_t tfm_spm_depriv_req_handler(uint32_t *svc_args, uint32_t excReturn)
922 {
923  struct tfm_state_context_t *svc_ctx =
924  (struct tfm_state_context_t *)svc_args;
925 
926  enum tfm_status_e res;
927 
928  if (excReturn & EXC_RETURN_STACK_PROCESS) {
929  /* FixMe: error severity TBD */
930  ERROR_MSG("Partition request SVC called with PSP active!");
932  }
933 
934  res = tfm_start_partition_for_irq_handling(excReturn, svc_ctx);
935  if (res != TFM_SUCCESS) {
936  /* The partition is in an invalid state (UNINIT or CLOSED), so none of
937  * its code can be run
938  */
939  /* FixMe: For now this case is handled with TF-M panic, however it would
940  * be possible to skip the execution of the interrupt handler, and
941  * resume the execution of the interrupted code.
942  */
944  }
946 }
947 
948 /* This SVC handler is called when sfn returns */
950 {
951  enum tfm_status_e res;
952 
953  if (!(lr & EXC_RETURN_STACK_PROCESS)) {
954  /* Partition return SVC called with MSP active.
955  * This should not happen!
956  */
957  ERROR_MSG("Partition return SVC called with MSP active!");
959  }
960 
961  res = tfm_return_from_partition(&lr);
962  if (res != TFM_SUCCESS) {
963  /* Unlock errors indicate ctx database corruption or unknown anomalies
964  * Halt execution
965  */
966  ERROR_MSG("Secure API error during unlock!");
968  }
969 
970  return lr;
971 }
972 
973 /* This SVC handler is called if a deprivileged IRQ handler was executed, and
974  * the execution environment is to be set back for the privileged handler mode
975  */
976 uint32_t tfm_spm_depriv_return_handler(uint32_t *irq_svc_args, uint32_t lr)
977 {
978  enum tfm_status_e res;
979  struct tfm_state_context_t *irq_svc_ctx;
980 
981  /* Take into account the sealed stack*/
982  irq_svc_args += 2;
983 
984  irq_svc_ctx = (struct tfm_state_context_t *)irq_svc_args;
985 
986  if (!(lr & EXC_RETURN_STACK_PROCESS)) {
987  /* Partition request SVC called with MSP active.
988  * FixMe: error severity TBD
989  */
990  ERROR_MSG("Partition request SVC called with MSP active!");
992  }
993 
994  res = tfm_return_from_partition_irq_handling(&lr);
995  if (res != TFM_SUCCESS) {
996  /* Unlock errors indicate ctx database corruption or unknown anomalies
997  * Halt execution
998  */
999  ERROR_MSG("Secure API error during unlock!");
1001  }
1002 
1003  irq_svc_ctx->ra = lr;
1004 
1006 }
1007 
1008 /* FIXME: get_irq_line_for_signal is also implemented in the ipc folder. */
1019 static IRQn_Type get_irq_line_for_signal(int32_t partition_id,
1020  psa_signal_t signal)
1021 {
1022  size_t i;
1023 
1024  for (i = 0; i < tfm_core_irq_signals_count; ++i) {
1025  if (tfm_core_irq_signals[i].partition_id == partition_id &&
1026  tfm_core_irq_signals[i].signal_value == signal) {
1027  return tfm_core_irq_signals[i].irq_line;
1028  }
1029  }
1030  return (IRQn_Type) -1;
1031 }
1032 
1033 void tfm_spm_enable_irq_handler(uint32_t *svc_args)
1034 {
1035  struct tfm_state_context_t *svc_ctx =
1036  (struct tfm_state_context_t *)svc_args;
1037  psa_signal_t irq_signal = svc_ctx->r0;
1038  uint32_t running_partition_idx =
1040  uint32_t running_partition_id =
1041  tfm_spm_partition_get_partition_id(running_partition_idx);
1042  IRQn_Type irq_line;
1043 
1044  /* Only a single signal is allowed */
1045  if (!tfm_is_one_bit_set(irq_signal)) {
1046  /* FixMe: error severity TBD */
1048  }
1049 
1050  irq_line = get_irq_line_for_signal(running_partition_id, irq_signal);
1051 
1052  if (irq_line < 0) {
1053  /* FixMe: error severity TBD */
1055  }
1056 
1057  tfm_spm_hal_enable_irq(irq_line);
1058 }
1059 
1060 void tfm_spm_disable_irq_handler(uint32_t *svc_args)
1061 {
1062  struct tfm_state_context_t *svc_ctx =
1063  (struct tfm_state_context_t *)svc_args;
1064  psa_signal_t irq_signal = svc_ctx->r0;
1065  uint32_t running_partition_idx =
1067  uint32_t running_partition_id =
1068  tfm_spm_partition_get_partition_id(running_partition_idx);
1069  IRQn_Type irq_line;
1070 
1071  /* Only a single signal is allowed */
1072  if (!tfm_is_one_bit_set(irq_signal)) {
1073  /* FixMe: error severity TBD */
1075  }
1076 
1077  irq_line = get_irq_line_for_signal(running_partition_id, irq_signal);
1078 
1079  if (irq_line < 0) {
1080  /* FixMe: error severity TBD */
1082  }
1083 
1084  tfm_spm_hal_disable_irq(irq_line);
1085 }
1086 
1087 void tfm_spm_psa_wait(uint32_t *svc_args)
1088 {
1089  /* Look for partition that is ready for run */
1090  struct tfm_state_context_t *svc_ctx =
1091  (struct tfm_state_context_t *)svc_args;
1092  uint32_t running_partition_idx;
1093  const struct spm_partition_runtime_data_t *curr_part_data;
1094 
1095  psa_signal_t signal_mask = svc_ctx->r0;
1096  uint32_t timeout = svc_ctx->r1;
1097 
1098  /*
1099  * Timeout[30:0] are reserved for future use.
1100  * SPM must ignore the value of RES.
1101  */
1102  timeout &= PSA_TIMEOUT_MASK;
1103 
1104  running_partition_idx = tfm_spm_partition_get_running_partition_idx();
1105  curr_part_data = tfm_spm_partition_get_runtime_data(running_partition_idx);
1106 
1107  if (timeout == PSA_BLOCK) {
1108  /* FIXME: Scheduling is not available in library model, and busy wait is
1109  * also not possible as this code is running in SVC context, and it
1110  * cannot be pre-empted by interrupts. So do nothing here for now
1111  */
1112  (void) signal_mask;
1113  }
1114 
1115  svc_ctx->r0 = curr_part_data->signal_mask;
1116 }
1117 
1118 void tfm_spm_psa_eoi(uint32_t *svc_args)
1119 {
1120  struct tfm_state_context_t *svc_ctx =
1121  (struct tfm_state_context_t *)svc_args;
1122  psa_signal_t irq_signal = svc_ctx->r0;
1123  uint32_t signal_mask;
1124  uint32_t running_partition_idx;
1125  uint32_t running_partition_id;
1126  const struct spm_partition_runtime_data_t *curr_part_data;
1127  IRQn_Type irq_line;
1128 
1129  running_partition_idx = tfm_spm_partition_get_running_partition_idx();
1130  running_partition_id =
1131  tfm_spm_partition_get_partition_id(running_partition_idx);
1132  curr_part_data = tfm_spm_partition_get_runtime_data(running_partition_idx);
1133 
1134  /* Only a single signal is allowed */
1135  if (!tfm_is_one_bit_set(irq_signal)) {
1137  }
1138 
1139  irq_line = get_irq_line_for_signal(running_partition_id, irq_signal);
1140 
1141  if (irq_line < 0) {
1142  /* FixMe: error severity TBD */
1144  }
1145 
1146  tfm_spm_hal_clear_pending_irq(irq_line);
1147  tfm_spm_hal_enable_irq(irq_line);
1148 
1149  signal_mask = curr_part_data->signal_mask & ~irq_signal;
1150  tfm_spm_partition_set_signal_mask(running_partition_idx, signal_mask);
1151 }
1152 
1153 /*
1154  * This function is called when a secure partition causes an error.
1155  * In case of an error in the error handling, a non-zero value have to be
1156  * returned.
1157  */
1158 static void tfm_spm_partition_err_handler(
1159  const struct spm_partition_desc_t *partition,
1160  int32_t err_code)
1161 {
1162  (void)err_code;
1163 
1166 }
1167 
1169 {
1170  struct spm_partition_desc_t *part;
1171  struct tfm_sfn_req_s desc;
1172  int32_t args[4] = {0};
1173  int32_t fail_cnt = 0;
1174  uint32_t idx;
1175  const struct tfm_spm_partition_platform_data_t **platform_data_p;
1176 
1177  /* Call the init function for each partition */
1178  for (idx = 0; idx < g_spm_partition_db.partition_count; ++idx) {
1179  part = &g_spm_partition_db.partitions[idx];
1180  platform_data_p = part->platform_data_list;
1181  if (platform_data_p != NULL) {
1182  while ((*platform_data_p) != NULL) {
1183  if (tfm_spm_hal_configure_default_isolation(idx,
1184  *platform_data_p) != TFM_PLAT_ERR_SUCCESS) {
1185  fail_cnt++;
1186  }
1187  ++platform_data_p;
1188  }
1189  }
1190  if (part->static_data->partition_init == NULL) {
1194  } else {
1195  int32_t res;
1196 
1197  desc.args = args;
1198  desc.ns_caller = false;
1199  desc.sfn = (sfn_t)part->static_data->partition_init;
1200  desc.sp_id = part->static_data->partition_id;
1201  res = tfm_core_sfn_request(&desc);
1202  if (res == TFM_SUCCESS) {
1204  } else {
1205  tfm_spm_partition_err_handler(part, res);
1206  fail_cnt++;
1207  }
1208  }
1209  }
1210 
1212 
1213  if (fail_cnt == 0) {
1214  return SPM_ERR_OK;
1215  } else {
1217  }
1218 }
1219 
1220 void tfm_spm_partition_push_interrupted_ctx(uint32_t partition_idx)
1221 {
1222  struct spm_partition_runtime_data_t *runtime_data =
1223  &g_spm_partition_db.partitions[partition_idx].runtime_data;
1224  struct interrupted_ctx_stack_frame_t *stack_frame =
1225  (struct interrupted_ctx_stack_frame_t *)runtime_data->ctx_stack_ptr;
1226 
1227  stack_frame->partition_state = runtime_data->partition_state;
1228 
1229  runtime_data->ctx_stack_ptr +=
1230  sizeof(struct interrupted_ctx_stack_frame_t) / sizeof(uint32_t);
1231 }
1232 
1233 void tfm_spm_partition_pop_interrupted_ctx(uint32_t partition_idx)
1234 {
1235  struct spm_partition_runtime_data_t *runtime_data =
1236  &g_spm_partition_db.partitions[partition_idx].runtime_data;
1237  struct interrupted_ctx_stack_frame_t *stack_frame;
1238 
1239  runtime_data->ctx_stack_ptr -=
1240  sizeof(struct interrupted_ctx_stack_frame_t) / sizeof(uint32_t);
1241 
1242  stack_frame = (struct interrupted_ctx_stack_frame_t *)
1243  runtime_data->ctx_stack_ptr;
1244  tfm_spm_partition_set_state(partition_idx, stack_frame->partition_state);
1245  stack_frame->partition_state = 0;
1246 }
1247 
1248 void tfm_spm_partition_push_handler_ctx(uint32_t partition_idx)
1249 {
1250  struct spm_partition_runtime_data_t *runtime_data =
1251  &g_spm_partition_db.partitions[partition_idx].runtime_data;
1252  struct handler_ctx_stack_frame_t *stack_frame =
1253  (struct handler_ctx_stack_frame_t *)
1254  runtime_data->ctx_stack_ptr;
1255 
1256  stack_frame->partition_state = runtime_data->partition_state;
1257  stack_frame->caller_partition_idx = runtime_data->caller_partition_idx;
1258 
1259  runtime_data->ctx_stack_ptr +=
1260  sizeof(struct handler_ctx_stack_frame_t) / sizeof(uint32_t);
1261 }
1262 
1263 void tfm_spm_partition_pop_handler_ctx(uint32_t partition_idx)
1264 {
1265  struct spm_partition_runtime_data_t *runtime_data =
1266  &g_spm_partition_db.partitions[partition_idx].runtime_data;
1267  struct handler_ctx_stack_frame_t *stack_frame;
1268 
1269  runtime_data->ctx_stack_ptr -=
1270  sizeof(struct handler_ctx_stack_frame_t) / sizeof(uint32_t);
1271 
1272  stack_frame = (struct handler_ctx_stack_frame_t *)
1273  runtime_data->ctx_stack_ptr;
1274 
1275  tfm_spm_partition_set_state(partition_idx, stack_frame->partition_state);
1276  stack_frame->partition_state = 0;
1278  partition_idx, stack_frame->caller_partition_idx);
1279  stack_frame->caller_partition_idx = 0;
1280 }
1281 
1282 void tfm_spm_partition_store_context(uint32_t partition_idx,
1283  uint32_t stack_ptr, uint32_t lr)
1284 {
1285  g_spm_partition_db.partitions[partition_idx].
1286  runtime_data.stack_ptr = stack_ptr;
1287  g_spm_partition_db.partitions[partition_idx].
1288  runtime_data.lr = lr;
1289 }
1290 
1291 const struct spm_partition_runtime_data_t *
1292  tfm_spm_partition_get_runtime_data(uint32_t partition_idx)
1293 {
1294  return &(g_spm_partition_db.partitions[partition_idx].runtime_data);
1295 }
1296 
1297 void tfm_spm_partition_set_state(uint32_t partition_idx, uint32_t state)
1298 {
1300  state;
1301  if (state == SPM_PARTITION_STATE_RUNNING ||
1303  g_spm_partition_db.running_partition_idx = partition_idx;
1304  }
1305 }
1306 
1308  uint32_t caller_partition_idx)
1309 {
1311  caller_partition_idx = caller_partition_idx;
1312 }
1313 
1314 void tfm_spm_partition_set_signal_mask(uint32_t partition_idx,
1315  uint32_t signal_mask)
1316 {
1318  signal_mask = signal_mask;
1319 }
1320 
1321 void tfm_spm_partition_set_caller_client_id(uint32_t partition_idx,
1322  int32_t caller_client_id)
1323 {
1325  caller_client_id = caller_client_id;
1326 }
1327 
1328 enum spm_err_t tfm_spm_partition_set_iovec(uint32_t partition_idx,
1329  const int32_t *args)
1330 {
1331  struct spm_partition_runtime_data_t *runtime_data =
1332  &g_spm_partition_db.partitions[partition_idx].runtime_data;
1333  size_t i;
1334 
1335  if ((args[1] < 0) || (args[3] < 0)) {
1337  }
1338 
1339  runtime_data->iovec_args.in_len = (size_t)args[1];
1340  for (i = 0U; i < runtime_data->iovec_args.in_len; ++i) {
1341  runtime_data->iovec_args.in_vec[i].base =
1342  ((psa_invec *)args[0])[i].base;
1343  runtime_data->iovec_args.in_vec[i].len = ((psa_invec *)args[0])[i].len;
1344  }
1345  runtime_data->iovec_args.out_len = (size_t)args[3];
1346  for (i = 0U; i < runtime_data->iovec_args.out_len; ++i) {
1347  runtime_data->iovec_args.out_vec[i].base =
1348  ((psa_outvec *)args[2])[i].base;
1349  runtime_data->iovec_args.out_vec[i].len =
1350  ((psa_outvec *)args[2])[i].len;
1351  }
1352  runtime_data->orig_outvec = (psa_outvec *)args[2];
1353 
1354  return SPM_ERR_OK;
1355 }
1356 
1358 {
1360 }
1361 
1362 void tfm_spm_partition_cleanup_context(uint32_t partition_idx)
1363 {
1364  struct spm_partition_desc_t *partition =
1365  &(g_spm_partition_db.partitions[partition_idx]);
1366  int32_t i;
1367 
1369  partition->runtime_data.iovec_args.in_len = 0;
1370  for (i = 0; i < PSA_MAX_IOVEC; ++i) {
1371  partition->runtime_data.iovec_args.in_vec[i].base = 0;
1372  partition->runtime_data.iovec_args.in_vec[i].len = 0;
1373  }
1374  partition->runtime_data.iovec_args.out_len = 0;
1375  for (i = 0; i < PSA_MAX_IOVEC; ++i) {
1376  partition->runtime_data.iovec_args.out_vec[i].base = 0;
1377  partition->runtime_data.iovec_args.out_vec[i].len = 0;
1378  }
1379  partition->runtime_data.orig_outvec = 0;
1380 }
1381 
1383 {
1384  uint32_t *res_ptr = (uint32_t *)&svc_ctx->r0;
1385  uint32_t running_partition_flags = 0;
1386  uint32_t running_partition_idx;
1387 
1388  /* Check permissions on request type basis */
1389 
1390  switch (svc_ctx->r0) {
1392  running_partition_idx =
1394  running_partition_flags = tfm_spm_partition_get_flags(
1395  running_partition_idx);
1396 
1397  /* Currently only PSA Root of Trust services are allowed to make Reset
1398  * vote request
1399  */
1400  if ((running_partition_flags & SPM_PART_FLAG_PSA_ROT) == 0) {
1401  *res_ptr = (uint32_t)TFM_ERROR_GENERIC;
1402  }
1403 
1404  /* FixMe: this is a placeholder for checks to be performed before
1405  * allowing execution of reset
1406  */
1407  *res_ptr = (uint32_t)TFM_SUCCESS;
1408 
1409  break;
1410  default:
1411  *res_ptr = (uint32_t)TFM_ERROR_INVALID_PARAMETER;
1412  }
1413 }
1414 
1416 {
1417  uint32_t i;
1418 
1419  /* This function initialises partition db */
1420 
1421  /* For the non secure Execution environment */
1423 
1424  for (i = 0; i < g_spm_partition_db.partition_count; i++) {
1432  ctx_stack_list[i];
1436  }
1438 
1439  return SPM_ERR_OK;
1440 }
#define SPM_PARTITION_STATE_CLOSED
Definition: spm_func.h:22
#define TFM_PARTITION_UNPRIVILEGED_MODE
Definition: spm_func.h:32
void tfm_spm_partition_push_interrupted_ctx(uint32_t partition_idx)
Save interrupted partition context on ctx stack.
Definition: spm_func.c:1220
void tfm_spm_secure_api_init_done(void)
Signal that secure partition initialisation is finished.
Definition: spm_func.c:730
void tfm_spm_psa_eoi(uint32_t *svc_args)
Handle request to record IRQ processed.
Definition: spm_func.c:1118
tfm_status_e
Definition: tfm_api.h:45
uint32_t psa_signal_t
Definition: service.h:50
__STATIC_INLINE void tfm_arch_set_psplim(uint32_t psplim)
Set PSP limit value.
void * base
Definition: client.h:75
#define SPM_PARTITION_STATE_UNINIT
Definition: spm_func.h:16
const struct tfm_core_irq_signal_data_t tfm_core_irq_signals[]
#define EXC_RETURN_STACK_PROCESS
Definition: tfm_arch_v8m.h:23
#define PSA_BLOCK
Definition: service.h:31
const struct tfm_spm_partition_platform_data_t ** platform_data_list_list[]
uint32_t tfm_spm_partition_return_handler(uint32_t lr)
Called when secure service returns.
Definition: spm_func.c:949
uint32_t sp_id
size_t in_len
Definition: spm_func.h:60
#define EXC_RETURN_SECURE_HANDLER
Definition: spm_func.c:30
enum spm_err_t tfm_spm_db_init(void)
Initialize partition database.
Definition: spm_func.c:1415
void tfm_secure_api_error_handler(void)
size_t len
Definition: client.h:68
int32_t(* sfn_t)(int32_t, int32_t, int32_t, int32_t)
const struct tfm_spm_partition_platform_data_t ** platform_data_list
Definition: spm_func.h:122
void tfm_spm_psa_wait(uint32_t *svc_args)
Handle signal wait request.
Definition: spm_func.c:1087
uint32_t tfm_spm_depriv_req_handler(uint32_t *svc_args, uint32_t excReturn)
Handle deprivileged request.
Definition: spm_func.c:921
#define EXC_RETURN_SECURE_FUNCTION
Definition: spm_func.c:29
#define SPM_PARTITION_STATE_IDLE
Definition: spm_func.h:17
REGION_DECLARE_T(Image $$, TFM_SECURE_STACK,$$ZI $$Base, uint32_t)
void tfm_spm_partition_push_handler_ctx(uint32_t partition_idx)
Save handler partition context on ctx stack.
Definition: spm_func.c:1248
int32_t tfm_core_sfn_request(const struct tfm_sfn_req_s *desc_ptr)
struct spm_partition_runtime_data_t runtime_data
Definition: spm_func.h:119
size_t out_len
Definition: spm_func.h:63
#define SPM_INVALID_PARTITION_IDX
Definition: spm_func.h:29
psa_outvec * orig_outvec
Definition: spm_func.h:92
void tfm_spm_disable_irq_handler(uint32_t *svc_args)
Handle IRQ disable request.
Definition: spm_func.c:1060
sp_entry_point partition_init
Definition: spm_func.h:109
void tfm_spm_partition_set_caller_client_id(uint32_t partition_idx, int32_t caller_client_id)
Set the caller client ID for a given partition.
Definition: spm_func.c:1321
enum tfm_status_e tfm_core_has_read_access_to_region(const void *p, size_t s, bool ns_caller, uint32_t privileged)
Check whether the current partition has read access to a memory range.
void tfm_spm_partition_pop_interrupted_ctx(uint32_t partition_idx)
Restores interrupted partition context on ctx stack.
Definition: spm_func.c:1233
void tfm_spm_partition_pop_handler_ctx(uint32_t partition_idx)
Restores handler partition context on ctx stack.
Definition: spm_func.c:1263
void tfm_spm_request_handler(const struct tfm_state_context_t *svc_ctx)
Handle an SPM request by a secure service.
Definition: spm_func.c:1382
#define SPM_PARTITION_STATE_HANDLING_IRQ
Definition: spm_func.h:19
const struct spm_partition_static_data_t static_data_list[]
psa_outvec out_vec[PSA_MAX_IOVEC]
Definition: spm_func.h:62
uint32_t tfm_spm_depriv_return_handler(uint32_t *irq_svc_args, uint32_t lr)
Handle request to return to privileged.
Definition: spm_func.c:976
const struct spm_partition_static_data_t * static_data
Definition: spm_func.h:120
#define PSA_MAX_IOVEC
Definition: client.h:54
uint32_t tfm_spm_partition_request_svc_handler(const uint32_t *svc_ctx, uint32_t excReturn)
Called if veneer is running in thread mode.
Definition: spm_func.c:894
#define ERROR_MSG(msg)
Definition: utilities.h:36
void tfm_spm_get_caller_client_id_handler(uint32_t *svc_args)
Stores caller's client id in state context.
Definition: spm_func.c:851
uint32_t running_partition_idx
Definition: spm_func.h:128
enum spm_err_t tfm_spm_partition_init(void)
Execute partition init function.
Definition: spm_func.c:1168
Runtime context information of a partition.
Definition: spm_func.h:85
bool tfm_is_partition_privileged(uint32_t partition_idx)
Return whether a secure partition is privileged.
Definition: spm_func.c:722
uint32_t partition_count
Definition: spm_func.h:127
enum tfm_status_e tfm_core_has_write_access_to_region(const void *p, size_t s, bool ns_caller, uint32_t privileged)
Check whether the current partition has write access to a memory range.
#define SPM_PART_FLAG_PSA_ROT
Definition: spm_func.h:36
int32_t tfm_nspm_get_current_client_id(void)
Get the client ID of the current NS client.
Definition: tfm_nspm_func.c:63
void tfm_spm_enable_irq_handler(uint32_t *svc_args)
Handle IRQ enable request.
Definition: spm_func.c:1033
int32_t tfm_spm_sfn_request_thread_mode(struct tfm_sfn_req_s *desc_ptr)
Definition: spm_func.c:780
const size_t tfm_core_irq_signals_count
#define SPM_PART_FLAG_APP_ROT
Definition: spm_func.h:35
struct spm_partition_db_t g_spm_partition_db
spm_err_t
Definition: spm_func.h:44
int32_t * args
uint32_t caller_part_idx
int32_t tfm_spm_check_buffer_access(uint32_t partition_idx, void *start_addr, size_t len, uint32_t alignment)
Check whether a buffer is ok for writing to by the privileged API function.
Definition: spm_func.c:818
#define TFM_PARTITION_PRIVILEGED_MODE
Definition: spm_func.h:33
void tfm_spm_partition_set_state(uint32_t partition_idx, uint32_t state)
Set the current state of a partition.
Definition: spm_func.c:1297
enum spm_err_t tfm_spm_partition_set_iovec(uint32_t partition_idx, const int32_t *args)
Set the iovec parameters for the partition.
Definition: spm_func.c:1328
enum tfm_status_e tfm_spm_sfn_request_handler(struct tfm_sfn_req_s *desc_ptr, uint32_t excReturn)
Definition: spm_func.c:735
#define TFM_SP_CORE_ID
void tfm_spm_seal_psp_stacks(void)
Function to seal the PSP stacks for Function mode.
Definition: spm_func.c:51
void tfm_spm_partition_cleanup_context(uint32_t partition_idx)
Clears the context info from the database for a partition.
Definition: spm_func.c:1362
uint32_t tfm_spm_partition_get_partition_id(uint32_t partition_idx)
Get the id of the partition for its index from the db.
Definition: spm_func.c:707
#define SPM_PARTITION_STATE_BLOCKED
Definition: spm_func.h:21
void tfm_spm_partition_store_context(uint32_t partition_idx, uint32_t stack_ptr, uint32_t lr)
Save stack pointer and link register for partition in database.
Definition: spm_func.c:1282
size_t len
Definition: client.h:76
void tfm_spm_partition_set_caller_partition_idx(uint32_t partition_idx, uint32_t caller_partition_idx)
Set the caller partition index for a given partition.
Definition: spm_func.c:1307
void tfm_spm_partition_set_signal_mask(uint32_t partition_idx, uint32_t signal_mask)
Set the signal mask for a given partition.
Definition: spm_func.c:1314
#define TFM_STACK_SEAL_VALUE
struct iovec_args_t iovec_args
Definition: spm_func.h:91
void tfm_nspm_configure_clients(void)
initialise the NS context database
Definition: tfm_nspm_func.c:48
#define INVALID_PARTITION_ID
uint32_t tfm_spm_partition_get_running_partition_idx(void)
Returns the index of the partition that has running state.
Definition: spm_func.c:1357
uint32_t is_init
Definition: spm_func.h:126
struct spm_partition_desc_t * partitions
Definition: spm_func.h:129
uint32_t tfm_spm_partition_get_privileged_mode(uint32_t partition_flags)
Get the current partition mode.
Definition: spm_func.c:713
const struct spm_partition_runtime_data_t * tfm_spm_partition_get_runtime_data(uint32_t partition_idx)
Get the current runtime data of a partition.
Definition: spm_func.c:1292
const void * base
Definition: client.h:67
#define PSA_TIMEOUT_MASK
Definition: tfm_api.h:41
#define TFM_INVALID_CLIENT_ID
Definition: tfm_api.h:18
Holds the iovec parameters that are passed to a service.
Definition: spm_func.h:58
uint32_t * ctx_stack_list[]
#define SPM_PARTITION_STATE_RUNNING
Definition: spm_func.h:18
psa_invec in_vec[PSA_MAX_IOVEC]
Definition: spm_func.h:59
bool tfm_is_one_bit_set(uint32_t n)
Definition: utilities.c:24
uint32_t caller_partition_idx
Definition: spm_func.h:79
#define SPM_PARTITION_STATE_SUSPENDED
Definition: spm_func.h:20