@@ -45,6 +45,12 @@ enum {
45
45
dma_debug_coherent ,
46
46
};
47
47
48
+ enum map_err_types {
49
+ MAP_ERR_CHECK_NOT_APPLICABLE ,
50
+ MAP_ERR_NOT_CHECKED ,
51
+ MAP_ERR_CHECKED ,
52
+ };
53
+
48
54
#define DMA_DEBUG_STACKTRACE_ENTRIES 5
49
55
50
56
struct dma_debug_entry {
@@ -57,6 +63,7 @@ struct dma_debug_entry {
57
63
int direction ;
58
64
int sg_call_ents ;
59
65
int sg_mapped_ents ;
66
+ enum map_err_types map_err_type ;
60
67
#ifdef CONFIG_STACKTRACE
61
68
struct stack_trace stacktrace ;
62
69
unsigned long st_entries [DMA_DEBUG_STACKTRACE_ENTRIES ];
@@ -114,6 +121,12 @@ static struct device_driver *current_driver __read_mostly;
114
121
115
122
static DEFINE_RWLOCK (driver_name_lock );
116
123
124
+ static const char * const maperr2str [] = {
125
+ [MAP_ERR_CHECK_NOT_APPLICABLE ] = "dma map error check not applicable" ,
126
+ [MAP_ERR_NOT_CHECKED ] = "dma map error not checked" ,
127
+ [MAP_ERR_CHECKED ] = "dma map error checked" ,
128
+ };
129
+
117
130
static const char * type2name [4 ] = { "single" , "page" ,
118
131
"scather-gather" , "coherent" };
119
132
@@ -376,11 +389,12 @@ void debug_dma_dump_mappings(struct device *dev)
376
389
list_for_each_entry (entry , & bucket -> list , list ) {
377
390
if (!dev || dev == entry -> dev ) {
378
391
dev_info (entry -> dev ,
379
- "%s idx %d P=%Lx D=%Lx L=%Lx %s\n" ,
392
+ "%s idx %d P=%Lx D=%Lx L=%Lx %s %s \n" ,
380
393
type2name [entry -> type ], idx ,
381
394
(unsigned long long )entry -> paddr ,
382
395
entry -> dev_addr , entry -> size ,
383
- dir2name [entry -> direction ]);
396
+ dir2name [entry -> direction ],
397
+ maperr2str [entry -> map_err_type ]);
384
398
}
385
399
}
386
400
@@ -838,13 +852,28 @@ static __init int dma_debug_entries_cmdline(char *str)
838
852
__setup ("dma_debug=" , dma_debug_cmdline );
839
853
__setup ("dma_debug_entries=" , dma_debug_entries_cmdline );
840
854
855
+ /* Calling dma_mapping_error() from dma-debug api will result in calling
856
+ debug_dma_mapping_error() - need internal mapping error routine to
857
+ avoid debug checks */
858
+ #ifndef DMA_ERROR_CODE
859
+ #define DMA_ERROR_CODE 0
860
+ #endif
861
+ static inline int has_mapping_error (struct device * dev , dma_addr_t dma_addr )
862
+ {
863
+ const struct dma_map_ops * ops = get_dma_ops (dev );
864
+ if (ops -> mapping_error )
865
+ return ops -> mapping_error (dev , dma_addr );
866
+
867
+ return (dma_addr == DMA_ERROR_CODE );
868
+ }
869
+
841
870
static void check_unmap (struct dma_debug_entry * ref )
842
871
{
843
872
struct dma_debug_entry * entry ;
844
873
struct hash_bucket * bucket ;
845
874
unsigned long flags ;
846
875
847
- if (dma_mapping_error ( ref -> dev , ref -> dev_addr )) {
876
+ if (unlikely ( has_mapping_error ( ref -> dev , ref -> dev_addr ) )) {
848
877
err_printk (ref -> dev , NULL , "DMA-API: device driver tries "
849
878
"to free an invalid DMA memory address\n" );
850
879
return ;
@@ -910,6 +939,15 @@ static void check_unmap(struct dma_debug_entry *ref)
910
939
dir2name [ref -> direction ]);
911
940
}
912
941
942
+ if (entry -> map_err_type == MAP_ERR_NOT_CHECKED ) {
943
+ err_printk (ref -> dev , entry ,
944
+ "DMA-API: device driver failed to check map error"
945
+ "[device address=0x%016llx] [size=%llu bytes] "
946
+ "[mapped as %s]" ,
947
+ ref -> dev_addr , ref -> size ,
948
+ type2name [entry -> type ]);
949
+ }
950
+
913
951
hash_bucket_del (entry );
914
952
dma_entry_free (entry );
915
953
@@ -1017,7 +1055,7 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
1017
1055
if (unlikely (global_disable ))
1018
1056
return ;
1019
1057
1020
- if (unlikely (dma_mapping_error (dev , dma_addr )))
1058
+ if (unlikely (has_mapping_error (dev , dma_addr )))
1021
1059
return ;
1022
1060
1023
1061
entry = dma_entry_alloc ();
@@ -1030,6 +1068,7 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
1030
1068
entry -> dev_addr = dma_addr ;
1031
1069
entry -> size = size ;
1032
1070
entry -> direction = direction ;
1071
+ entry -> map_err_type = MAP_ERR_NOT_CHECKED ;
1033
1072
1034
1073
if (map_single )
1035
1074
entry -> type = dma_debug_single ;
@@ -1045,6 +1084,30 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
1045
1084
}
1046
1085
EXPORT_SYMBOL (debug_dma_map_page );
1047
1086
1087
+ void debug_dma_mapping_error (struct device * dev , dma_addr_t dma_addr )
1088
+ {
1089
+ struct dma_debug_entry ref ;
1090
+ struct dma_debug_entry * entry ;
1091
+ struct hash_bucket * bucket ;
1092
+ unsigned long flags ;
1093
+
1094
+ if (unlikely (global_disable ))
1095
+ return ;
1096
+
1097
+ ref .dev = dev ;
1098
+ ref .dev_addr = dma_addr ;
1099
+ bucket = get_hash_bucket (& ref , & flags );
1100
+ entry = bucket_find_exact (bucket , & ref );
1101
+
1102
+ if (!entry )
1103
+ goto out ;
1104
+
1105
+ entry -> map_err_type = MAP_ERR_CHECKED ;
1106
+ out :
1107
+ put_hash_bucket (bucket , & flags );
1108
+ }
1109
+ EXPORT_SYMBOL (debug_dma_mapping_error );
1110
+
1048
1111
void debug_dma_unmap_page (struct device * dev , dma_addr_t addr ,
1049
1112
size_t size , int direction , bool map_single )
1050
1113
{
0 commit comments