gna_config.hpp
1 // Copyright (C) 2018 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
3 //
4 
5 /**
6  * @brief A header that defines advanced related properties for VPU plugins.
7  * These properties should be used in SetConfig() and LoadNetwork() methods of plugins
8  *
9  * @file vpu_plugin_config.hpp
10  */
11 
12 #pragma once
13 
14 #include <string>
15 #include "../ie_plugin_config.hpp"
16 
17 namespace InferenceEngine {
18 
19 namespace GNAConfigParams {
20 
21 #define GNA_CONFIG_KEY(name) InferenceEngine::GNAConfigParams::_CONFIG_KEY(GNA_##name)
22 #define GNA_CONFIG_VALUE(name) InferenceEngine::GNAConfigParams::GNA_##name
23 
24 #define DECLARE_GNA_CONFIG_KEY(name) DECLARE_CONFIG_KEY(GNA_##name)
25 #define DECLARE_GNA_CONFIG_VALUE(name) DECLARE_CONFIG_VALUE(GNA_##name)
26 
27 /**
28 * @brief Scale factor that is calculated by user, in order to use static quantisation feature
29 * This option should be used with floating point value serialized to string with decimal separator equals to . (dot)
30 */
31 DECLARE_GNA_CONFIG_KEY(SCALE_FACTOR);
32 
33 /**
34 * @brief By default gna api work in Int16 precision, however this can be adjusted if necessary,
35 * currently supported values are I16, I8
36 */
37 DECLARE_GNA_CONFIG_KEY(PRECISION);
38 
39 
40 /**
41 * @brief if turned on, dump GNA firmware model into specified file
42 */
43 DECLARE_GNA_CONFIG_KEY(FIRMWARE_MODEL_IMAGE);
44 
45 /**
46 * @brief GNA proc_type setting that should be one of GNA_AUTO, GNA_HW, GNA_SW, GNA_SW_EXACT
47 */
48 DECLARE_GNA_CONFIG_KEY(DEVICE_MODE);
49 
50 DECLARE_GNA_CONFIG_VALUE(AUTO);
51 DECLARE_GNA_CONFIG_VALUE(HW);
52 DECLARE_GNA_CONFIG_VALUE(SW);
53 DECLARE_GNA_CONFIG_VALUE(SW_EXACT);
54 
55 /**
56 * @brief if enabled produced minimum memory footprint for loaded network in GNA memory, default value is YES
57 */
58 DECLARE_GNA_CONFIG_KEY(COMPACT_MODE);
59 
60 /**
61 * @brief The option to enable/disable uniformly distributed PWL algorithm.
62 * By default (in case of NO value set) the optimized algorithm called "Recursive Descent Algorithm for Finding
63 * the Optimal Minimax Piecewise Linear Approximation of Convex Functions is used.
64 * If value is YES then simple uniform distribution used to create PWL approximation of activation functions
65 * Uniform distribution usually gives poor approximation with same number of segments
66 */
67 DECLARE_GNA_CONFIG_KEY(PWL_UNIFORM_DESIGN);
68 
69 /**
70 * @brief By default, the GNA plugin uses one worker thread for inference computations.
71 * This parameter allows you to create up to 127 threads for software modes.
72 *
73 * Note that multithreading mode does not guarantee the same computation order as order
74 * of issuing. Additionally, in this case, software modes do not implement any serializations.
75 */
76 DECLARE_GNA_CONFIG_KEY(LIB_N_THREADS);
77 } // namespace GNAConfigParams
78 } // namespace InferenceEngine
Definition: ie_argmax_layer.hpp:11