GCC Wikia
Advertisement

このページを編集する際は,編集に関する方針に従ってください.[]

概要[]

引数[]

実装[]

1003 /* Allocate a chunk of memory of at least ORIG_SIZE bytes, in ZONE.  */
1004 
1005 void *
1006 ggc_alloc_zone_stat (size_t orig_size, struct alloc_zone *zone
1007                      MEM_STAT_DECL)
1008 {
1009   size_t bin;
1010   size_t csize;
1011   struct small_page_entry *entry;
1012   struct alloc_chunk *chunk, **pp;
1013   void *result;
1014   size_t size = orig_size;
1015 


1016   /* Make sure that zero-sized allocations get a unique and freeable
1017      pointer.  */
1018   if (size == 0)
1019     size = MAX_ALIGNMENT;
1020   else
1021     size = (size + MAX_ALIGNMENT - 1) & -MAX_ALIGNMENT;
1022 


1023   /* Try to allocate the object from several different sources.  Each
1024      of these cases is responsible for setting RESULT and SIZE to
1025      describe the allocated block, before jumping to FOUND.  If a
1026      chunk is split, the allocate bit for the new chunk should also be
1027      set.
1028 
1029      Large objects are handled specially.  However, they'll just fail
1030      the next couple of conditions, so we can wait to check for them
1031      below.  The large object case is relatively rare (< 1%), so this
1032      is a win.  */
1033 


1034   /* First try to split the last chunk we allocated.  For best
1035      fragmentation behavior it would be better to look for a
1036      free bin of the appropriate size for a small object.  However,
1037      we're unlikely (1% - 7%) to find one, and this gives better
1038      locality behavior anyway.  This case handles the lion's share
1039      of all calls to this function.  */
1040   if (size <= zone->cached_free_size)
1041     {
1042       result = zone->cached_free;
1043 
1044       zone->cached_free_size -= size;

  • 自由に使える領域が残っているなら

1045       if (zone->cached_free_size)
1046         {
1047           zone->cached_free += size;
1048           zone_set_object_alloc_bit (zone->cached_free);
1049         }
1050 
1051       goto found;
1052     }
1053 


  • 次に、ちょうどいい大きさのビン(bin)がないか探してみる

1054   /* Next, try to find a free bin of the exactly correct size.  */
1055 
1056   /* We want to round SIZE up, rather than down, but we know it's
1057      already aligned to at least FREE_BIN_DELTA, so we can just
1058      shift.  */
1059   bin = SIZE_BIN_DOWN (size);
1060 
1061   if (bin <= NUM_FREE_BINS
1062       && (chunk = zone->free_chunks[bin]) != NULL)
1063     {
1064       /* We have a chunk of the right size.  Pull it off the free list
1065          and use it.  */
1066 
1067       zone->free_chunks[bin] = chunk->next_free;
1068 
1069       /* NOTE: SIZE is only guaranteed to be right if MAX_ALIGNMENT
1070          == FREE_BIN_DELTA.  */
1071       result = chunk;
1072 
1073       /* The allocation bits are already set correctly.  HIGH_FREE_BIN
1074          may now be wrong, if this was the last chunk in the high bin.
1075          Rather than fixing it up now, wait until we need to search
1076          the free bins.  */
1077 
1078       goto found;
1079     }
1080 



1081   /* Next, if there wasn't a chunk of the ideal size, look for a chunk
1082      to split.  We can find one in the too-big bin, or in the largest
1083      sized bin with a chunk in it.  Try the largest normal-sized bin
1084      first.  */
1085 
1086   if (zone->high_free_bin > bin)
1087     {
1088       /* Find the highest numbered free bin.  It will be at or below
1089          the watermark.  */
1090       while (zone->high_free_bin > bin
1091              && zone->free_chunks[zone->high_free_bin] == NULL)
1092         zone->high_free_bin--;
1093 
1094       if (zone->high_free_bin > bin)
1095         {
1096           size_t tbin = zone->high_free_bin;
1097           chunk = zone->free_chunks[tbin];
1098 
1099           /* Remove the chunk from its previous bin.  */
1100           zone->free_chunks[tbin] = chunk->next_free;
1101 
1102           result = (char *) chunk;
1103 
1104           /* Save the rest of the chunk for future allocation.  */
1105           if (zone->cached_free_size)
1106             free_chunk (zone->cached_free, zone->cached_free_size, zone);
1107 
1108           chunk = (struct alloc_chunk *) ((char *) result + size);
1109           zone->cached_free = (char *) chunk;
1110           zone->cached_free_size = (tbin - bin) * FREE_BIN_DELTA;
1111 
1112           /* Mark the new free chunk as an object, so that we can
1113              find the size of the newly allocated object.  */
1114           zone_set_object_alloc_bit (chunk);
1115 
1116           /* HIGH_FREE_BIN may now be wrong, if this was the last
1117              chunk in the high bin.  Rather than fixing it up now,
1118              wait until we need to search the free bins.  */
1119 
1120           goto [[found>ggc_alloc_zone_stat#found]];
1121         }
1122     }
1123



1124   /* Failing that, look through the "other" bucket for a chunk
1125      that is large enough.  */
1126   pp = &(zone->free_chunks[0]);
1127   chunk = *pp;
1128   while (chunk && chunk->size < size)
1129     {
1130       pp = &chunk->next_free;
1131       chunk = *pp;
1132     }
1133 
1134   if (chunk)
1135     {
1136       /* Remove the chunk from its previous bin.  */
1137       *pp = chunk->next_free;
1138 
1139       result = (char *) chunk;
1140 
1141       /* Save the rest of the chunk for future allocation, if there's any
1142          left over.  */
1143       csize = chunk->size;
1144       if (csize > size)
1145         {
1146           if (zone->cached_free_size)
1147             free_chunk (zone->cached_free, zone->cached_free_size, zone);
1148 
1149           chunk = (struct alloc_chunk *) ((char *) result + size);
1150           zone->cached_free = (char *) chunk;
1151           zone->cached_free_size = csize - size;
1152 
1153           /* Mark the new free chunk as an object.  */
1154           zone_set_object_alloc_bit (chunk);
1155         }
1156 
1157       goto [[found>ggc_alloc_zone_stat#found]];
1158     }
1159 



1160   /* Handle large allocations.  We could choose any threshold between
1161      GGC_PAGE_SIZE - sizeof (struct large_page_entry) and
1162      GGC_PAGE_SIZE.  It can't be smaller, because then it wouldn't
1163      be guaranteed to have a unique entry in the lookup table.  Large
1164      allocations will always fall through to here.  */
1165   if (size > GGC_PAGE_SIZE)
1166     {
1167       struct large_page_entry *entry = alloc_large_page (size, zone);
1168 
1169 #ifdef GATHER_STATISTICS
1170       entry->common.survived = 0;
1171 #endif
1172 
1173       entry->next = zone->large_pages;
1174       if (zone->large_pages)
1175         zone->large_pages->prev = entry;
1176       zone->large_pages = entry;
1177 
1178       result = entry->common.page;
1179 
1180       goto [[found>ggc_alloc_zone_stat#found]];
1181     }
1182 



  • どれも見つからなかったら、新しいスモールページを確保する

1183   /* Failing everything above, allocate a new small page.  */
1184 
1185   entry = alloc_small_page (zone);
1186   entry->next = zone->pages;
1187   zone->pages = entry;
1188 
1189   /* Mark the first chunk in the new page.  */
1190   entry->alloc_bits[0] = 1;
1191 
1192   result = entry->common.page;
1193   if (size < SMALL_PAGE_SIZE)
1194     {
1195       if (zone->cached_free_size)
1196         free_chunk (zone->cached_free, zone->cached_free_size, zone);
1197 
1198       zone->cached_free = (char *) result + size;
1199       zone->cached_free_size = SMALL_PAGE_SIZE - size;
1200 
1201       /* Mark the new free chunk as an object.  */
1202       zone_set_object_alloc_bit (zone->cached_free);
1203     }
1204 



  • 割り当てるところが見つかってからの処理

1205  found:
1206 
1207   /* We could save TYPE in the chunk, but we don't use that for
1208      anything yet.  If we wanted to, we could do it by adding it
1209      either before the beginning of the chunk or after its end,
1210      and adjusting the size and pointer appropriately.  */
1211

  • この関数を抜けた後、resultがさす先に書き込むはずなので事前にキャッシュに転送しようとする

1212   /* We'll probably write to this after we return.  */
1213   prefetchw (result);
1214 
1215 #ifdef ENABLE_GC_CHECKING
1216   /* `Poison' the entire allocated object.  */
1217   VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
1218   memset (result, 0xaf, size);
1219   VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (result + orig_size,
1220                                             size - orig_size));
1221 #endif
1222 
1223   /* Tell Valgrind that the memory is there, but its content isn't
1224      defined.  The bytes at the end of the object are still marked
1225      unaccessible.  */
1226   VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, orig_size));
1227 
1228   /* Keep track of how many bytes are being allocated.  This
1229      information is used in deciding when to collect.  */
1230   zone->allocated += size;
1231   
1232   timevar_ggc_mem_total += size;
1233


  • オーバヘッドの記録

1234 #ifdef GATHER_STATISTICS
1235   ggc_record_overhead (orig_size, size - orig_size, result PASS_MEM_STAT);
1236 
1237   {

  • 1237〜1259間でのみ有効な変数の宣言

1238     size_t object_size = size;
1239     size_t overhead = object_size - orig_size;
1240 

  • 合計

1241     zone->stats.total_overhead += overhead;
1242     zone->stats.total_allocated += object_size;
1243 

  • 大きさ別の集計

1244     if (orig_size <= 32)
1245       {
1246         zone->stats.total_overhead_under32 += overhead;
1247         zone->stats.total_allocated_under32 += object_size;
1248       }
1249     if (orig_size <= 64)
1250       {
1251         zone->stats.total_overhead_under64 += overhead;
1252         zone->stats.total_allocated_under64 += object_size;
1253       }
1254     if (orig_size <= 128)
1255       {
1256         zone->stats.total_overhead_under128 += overhead;
1257         zone->stats.total_allocated_under128 += object_size;
1258       }
1259   }
1260 #endif
1261 


  • デバッグ情報をファイルに書き込む

1262   if (GGC_DEBUG_LEVEL >= 3)
1263     fprintf ([[G>struct globals]].debug_file, "Allocating object, size=%lu at %p\n",
1264              (unsigned long) size, result);
1265


1266   return result;
1267 }



リンク元

Advertisement