@@ -35,9 +35,9 @@ struct heap_control {
35
35
struct sbi_dlist used_space_list ;
36
36
};
37
37
38
- static struct heap_control hpctrl ;
38
+ static struct heap_control global_hpctrl ;
39
39
40
- void * sbi_malloc ( size_t size )
40
+ void * sbi_malloc_from ( struct heap_control * hpctrl , size_t size )
41
41
{
42
42
void * ret = NULL ;
43
43
struct heap_node * n , * np ;
@@ -48,81 +48,91 @@ void *sbi_malloc(size_t size)
48
48
size += HEAP_ALLOC_ALIGN - 1 ;
49
49
size &= ~((unsigned long )HEAP_ALLOC_ALIGN - 1 );
50
50
51
- spin_lock (& hpctrl . lock );
51
+ spin_lock (& hpctrl -> lock );
52
52
53
53
np = NULL ;
54
- sbi_list_for_each_entry (n , & hpctrl . free_space_list , head ) {
54
+ sbi_list_for_each_entry (n , & hpctrl -> free_space_list , head ) {
55
55
if (size <= n -> size ) {
56
56
np = n ;
57
57
break ;
58
58
}
59
59
}
60
60
if (np ) {
61
61
if ((size < np -> size ) &&
62
- !sbi_list_empty (& hpctrl . free_node_list )) {
63
- n = sbi_list_first_entry (& hpctrl . free_node_list ,
62
+ !sbi_list_empty (& hpctrl -> free_node_list )) {
63
+ n = sbi_list_first_entry (& hpctrl -> free_node_list ,
64
64
struct heap_node , head );
65
65
sbi_list_del (& n -> head );
66
66
n -> addr = np -> addr + np -> size - size ;
67
67
n -> size = size ;
68
68
np -> size -= size ;
69
- sbi_list_add_tail (& n -> head , & hpctrl . used_space_list );
69
+ sbi_list_add_tail (& n -> head , & hpctrl -> used_space_list );
70
70
ret = (void * )n -> addr ;
71
71
} else if (size == np -> size ) {
72
72
sbi_list_del (& np -> head );
73
- sbi_list_add_tail (& np -> head , & hpctrl . used_space_list );
73
+ sbi_list_add_tail (& np -> head , & hpctrl -> used_space_list );
74
74
ret = (void * )np -> addr ;
75
75
}
76
76
}
77
77
78
- spin_unlock (& hpctrl . lock );
78
+ spin_unlock (& hpctrl -> lock );
79
79
80
80
return ret ;
81
81
}
82
82
83
- void * sbi_zalloc (size_t size )
83
+ void * sbi_malloc (size_t size )
84
84
{
85
- void * ret = sbi_malloc (size );
85
+ return sbi_malloc_from (& global_hpctrl , size );
86
+ }
87
+
88
+ void * sbi_zalloc_from (struct heap_control * hpctrl , size_t size )
89
+ {
90
+ void * ret = sbi_malloc_from (hpctrl , size );
86
91
87
92
if (ret )
88
93
sbi_memset (ret , 0 , size );
89
94
return ret ;
90
95
}
91
96
92
- void sbi_free (void * ptr )
97
+ void * sbi_zalloc (size_t size )
98
+ {
99
+ return sbi_malloc_from (& global_hpctrl , size );
100
+ }
101
+
102
+ void sbi_free_from (struct heap_control * hpctrl , void * ptr )
93
103
{
94
104
struct heap_node * n , * np ;
95
105
96
106
if (!ptr )
97
107
return ;
98
108
99
- spin_lock (& hpctrl . lock );
109
+ spin_lock (& hpctrl -> lock );
100
110
101
111
np = NULL ;
102
- sbi_list_for_each_entry (n , & hpctrl . used_space_list , head ) {
112
+ sbi_list_for_each_entry (n , & hpctrl -> used_space_list , head ) {
103
113
if ((n -> addr <= (unsigned long )ptr ) &&
104
114
((unsigned long )ptr < (n -> addr + n -> size ))) {
105
115
np = n ;
106
116
break ;
107
117
}
108
118
}
109
119
if (!np ) {
110
- spin_unlock (& hpctrl . lock );
120
+ spin_unlock (& hpctrl -> lock );
111
121
return ;
112
122
}
113
123
114
124
sbi_list_del (& np -> head );
115
125
116
- sbi_list_for_each_entry (n , & hpctrl . free_space_list , head ) {
126
+ sbi_list_for_each_entry (n , & hpctrl -> free_space_list , head ) {
117
127
if ((np -> addr + np -> size ) == n -> addr ) {
118
128
n -> addr = np -> addr ;
119
129
n -> size += np -> size ;
120
- sbi_list_add_tail (& np -> head , & hpctrl . free_node_list );
130
+ sbi_list_add_tail (& np -> head , & hpctrl -> free_node_list );
121
131
np = NULL ;
122
132
break ;
123
133
} else if (np -> addr == (n -> addr + n -> size )) {
124
134
n -> size += np -> size ;
125
- sbi_list_add_tail (& np -> head , & hpctrl . free_node_list );
135
+ sbi_list_add_tail (& np -> head , & hpctrl -> free_node_list );
126
136
np = NULL ;
127
137
break ;
128
138
} else if ((n -> addr + n -> size ) < np -> addr ) {
@@ -132,73 +142,107 @@ void sbi_free(void *ptr)
132
142
}
133
143
}
134
144
if (np )
135
- sbi_list_add_tail (& np -> head , & hpctrl . free_space_list );
145
+ sbi_list_add_tail (& np -> head , & hpctrl -> free_space_list );
136
146
137
- spin_unlock (& hpctrl . lock );
147
+ spin_unlock (& hpctrl -> lock );
138
148
}
139
149
140
- unsigned long sbi_heap_free_space (void )
150
+ void sbi_free (void * ptr )
151
+ {
152
+ return sbi_free_from (& global_hpctrl , ptr );
153
+ }
154
+
155
+ unsigned long sbi_heap_free_space_from (struct heap_control * hpctrl )
141
156
{
142
157
struct heap_node * n ;
143
158
unsigned long ret = 0 ;
144
159
145
- spin_lock (& hpctrl . lock );
146
- sbi_list_for_each_entry (n , & hpctrl . free_space_list , head )
160
+ spin_lock (& hpctrl -> lock );
161
+ sbi_list_for_each_entry (n , & hpctrl -> free_space_list , head )
147
162
ret += n -> size ;
148
- spin_unlock (& hpctrl . lock );
163
+ spin_unlock (& hpctrl -> lock );
149
164
150
165
return ret ;
151
166
}
152
167
168
+ unsigned long sbi_heap_free_space (void )
169
+ {
170
+ return sbi_heap_free_space_from (& global_hpctrl );
171
+ }
172
+
173
+ unsigned long sbi_heap_used_space_from (struct heap_control * hpctrl )
174
+ {
175
+ return hpctrl -> size - hpctrl -> hksize - sbi_heap_free_space ();
176
+ }
177
+
153
178
unsigned long sbi_heap_used_space (void )
154
179
{
155
- return hpctrl .size - hpctrl .hksize - sbi_heap_free_space ();
180
+ return sbi_heap_free_space_from (& global_hpctrl );
181
+ }
182
+
183
+ unsigned long sbi_heap_reserved_space_from (struct heap_control * hpctrl )
184
+ {
185
+ return hpctrl -> hksize ;
156
186
}
157
187
158
188
unsigned long sbi_heap_reserved_space (void )
159
189
{
160
- return hpctrl . hksize ;
190
+ return sbi_heap_free_space_from ( & global_hpctrl ) ;
161
191
}
162
192
163
- int sbi_heap_init (struct sbi_scratch * scratch )
193
+ int sbi_heap_init_new (struct heap_control * hpctrl , unsigned long base ,
194
+ unsigned long size )
164
195
{
165
196
unsigned long i ;
166
197
struct heap_node * n ;
167
198
168
- /* Sanity checks on heap offset and size */
169
- if (!scratch -> fw_heap_size ||
170
- (scratch -> fw_heap_size & (HEAP_BASE_ALIGN - 1 )) ||
171
- (scratch -> fw_heap_offset < scratch -> fw_rw_offset ) ||
172
- (scratch -> fw_size < (scratch -> fw_heap_offset + scratch -> fw_heap_size )) ||
173
- (scratch -> fw_heap_offset & (HEAP_BASE_ALIGN - 1 )))
174
- return SBI_EINVAL ;
175
-
176
199
/* Initialize heap control */
177
- SPIN_LOCK_INIT (hpctrl . lock );
178
- hpctrl . base = scratch -> fw_start + scratch -> fw_heap_offset ;
179
- hpctrl . size = scratch -> fw_heap_size ;
180
- hpctrl . hkbase = hpctrl . base ;
181
- hpctrl . hksize = hpctrl . size / HEAP_HOUSEKEEPING_FACTOR ;
182
- hpctrl . hksize &= ~((unsigned long )HEAP_BASE_ALIGN - 1 );
183
- SBI_INIT_LIST_HEAD (& hpctrl . free_node_list );
184
- SBI_INIT_LIST_HEAD (& hpctrl . free_space_list );
185
- SBI_INIT_LIST_HEAD (& hpctrl . used_space_list );
200
+ SPIN_LOCK_INIT (hpctrl -> lock );
201
+ hpctrl -> base = base ;
202
+ hpctrl -> size = size ;
203
+ hpctrl -> hkbase = hpctrl -> base ;
204
+ hpctrl -> hksize = hpctrl -> size / HEAP_HOUSEKEEPING_FACTOR ;
205
+ hpctrl -> hksize &= ~((unsigned long )HEAP_BASE_ALIGN - 1 );
206
+ SBI_INIT_LIST_HEAD (& hpctrl -> free_node_list );
207
+ SBI_INIT_LIST_HEAD (& hpctrl -> free_space_list );
208
+ SBI_INIT_LIST_HEAD (& hpctrl -> used_space_list );
186
209
187
210
/* Prepare free node list */
188
- for (i = 0 ; i < (hpctrl . hksize / sizeof (* n )); i ++ ) {
189
- n = (struct heap_node * )(hpctrl . hkbase + (sizeof (* n ) * i ));
211
+ for (i = 0 ; i < (hpctrl -> hksize / sizeof (* n )); i ++ ) {
212
+ n = (struct heap_node * )(hpctrl -> hkbase + (sizeof (* n ) * i ));
190
213
SBI_INIT_LIST_HEAD (& n -> head );
191
214
n -> addr = n -> size = 0 ;
192
- sbi_list_add_tail (& n -> head , & hpctrl . free_node_list );
215
+ sbi_list_add_tail (& n -> head , & hpctrl -> free_node_list );
193
216
}
194
217
195
218
/* Prepare free space list */
196
- n = sbi_list_first_entry (& hpctrl . free_node_list ,
219
+ n = sbi_list_first_entry (& hpctrl -> free_node_list ,
197
220
struct heap_node , head );
198
221
sbi_list_del (& n -> head );
199
- n -> addr = hpctrl . hkbase + hpctrl . hksize ;
200
- n -> size = hpctrl . size - hpctrl . hksize ;
201
- sbi_list_add_tail (& n -> head , & hpctrl . free_space_list );
222
+ n -> addr = hpctrl -> hkbase + hpctrl -> hksize ;
223
+ n -> size = hpctrl -> size - hpctrl -> hksize ;
224
+ sbi_list_add_tail (& n -> head , & hpctrl -> free_space_list );
202
225
203
226
return 0 ;
204
227
}
228
+
229
+ int sbi_heap_init (struct sbi_scratch * scratch )
230
+ {
231
+ /* Sanity checks on heap offset and size */
232
+ if (!scratch -> fw_heap_size ||
233
+ (scratch -> fw_heap_size & (HEAP_BASE_ALIGN - 1 )) ||
234
+ (scratch -> fw_heap_offset < scratch -> fw_rw_offset ) ||
235
+ (scratch -> fw_size < (scratch -> fw_heap_offset + scratch -> fw_heap_size )) ||
236
+ (scratch -> fw_heap_offset & (HEAP_BASE_ALIGN - 1 )))
237
+ return SBI_EINVAL ;
238
+
239
+ return sbi_heap_init_from (& global_hpctrl ,
240
+ scratch -> fw_start + scratch -> fw_heap_offset ,
241
+ scratch -> fw_heap_size );
242
+ }
243
+
244
+ int sbi_heap_alloc_new (struct heap_control * * hpctrl )
245
+ {
246
+ * hpctrl = sbi_calloc (1 , sizeof (struct heap_control ));
247
+ return 0 ;
248
+ }
0 commit comments