GCC Wikia
Advertisement

このページを編集する際は,編集に関する方針に従ってください.[]

概要[]

引数[]

実装[]

ggc-none.c[]

43 void *
44 ggc_alloc_stat (size_t size MEM_STAT_DECL)
45 {
46   return xmalloc (size);
47 }

ggc-page.c[]

1055 /* Allocate a chunk of memory of SIZE bytes.  Its contents are undefined.  */
1056 
1057 void *
1058 ggc_alloc_stat (size_t size MEM_STAT_DECL)
1059 {
1060   size_t order, word, bit, object_offset, object_size;
1061   struct page_entry *entry;
1062   void *result;
1063 
1064   if (size <= 256)
1065     {
1066       order = size_lookup[size];
1067       object_size = OBJECT_SIZE (order);
1068     }
1069   else
1070     {
1071       order = 9;
1072       while (size > (object_size = OBJECT_SIZE (order)))
1073         order++;
1074     }
1075 
1076   /* If there are non-full pages for this size allocation, they are at
1077      the head of the list.  */
1078   entry = G.pages[order];
1079 
1080   /* If there is no page for this object size, or all pages in this
1081      context are full, allocate a new page.  */
1082   if (entry == NULL || entry->num_free_objects == 0)
1083     {
1084       struct page_entry *new_entry;
1085       new_entry = alloc_page (order);
1086 
1087       new_entry->index_by_depth = G.by_depth_in_use;
1088       push_by_depth (new_entry, 0);
1089 
1090       /* We can skip context depths, if we do, make sure we go all the
1091          way to the new depth.  */
1092       while (new_entry->context_depth >= G.depth_in_use)
1093         push_depth (G.by_depth_in_use-1);
1094 
1095       /* If this is the only entry, it's also the tail.  If it is not
1096          the only entry, then we must update the PREV pointer of the
1097          ENTRY (G.pages[order]) to point to our new page entry.  */
1098       if (entry == NULL)
1099         G.page_tails[order] = new_entry;
1100       else
1101         entry->prev = new_entry;
1102 
1103       /* Put new pages at the head of the page list.  By definition the
1104          entry at the head of the list always has a NULL pointer.  */
1105       new_entry->next = entry;
1106       new_entry->prev = NULL;
1107       entry = new_entry;
1108       G.pages[order] = new_entry;
1109 
1110       /* For a new page, we know the word and bit positions (in the
1111          in_use bitmap) of the first available object -- they're zero.  */
1112       new_entry->next_bit_hint = 1;
1113       word = 0;
1114       bit = 0;
1115       object_offset = 0;
1116     }
1117   else
1118     {
1119       /* First try to use the hint left from the previous allocation
1120          to locate a clear bit in the in-use bitmap.  We've made sure
1121          that the one-past-the-end bit is always set, so if the hint
1122          has run over, this test will fail.  */
1123       unsigned hint = entry->next_bit_hint;
1124       word = hint / HOST_BITS_PER_LONG;
1125       bit = hint % HOST_BITS_PER_LONG;
1126 
1127       /* If the hint didn't work, scan the bitmap from the beginning.  */
1128       if ((entry->in_use_p[word] >> bit) & 1)
1129         {
1130           word = bit = 0;
1131           while (~entry->in_use_p[word] == 0)
1132             ++word;
1133 
1134 #if GCC_VERSION >= 3004
1135           bit = __builtin_ctzl (~entry->in_use_p[word]);
1136 #else
1137           while ((entry->in_use_p[word] >> bit) & 1)
1138             ++bit;
1139 #endif
1140 
1141           hint = word * HOST_BITS_PER_LONG + bit;
1142         }
1143 
1144       /* Next time, try the next bit.  */
1145       entry->next_bit_hint = hint + 1;
1146 
1147       object_offset = hint * object_size;
1148     }
1149 
1150   /* Set the in-use bit.  */
1151   entry->in_use_p[word] |= ((unsigned long) 1 << bit);
1152 
1153   /* Keep a running total of the number of free objects.  If this page
1154      fills up, we may have to move it to the end of the list if the
1155      next page isn't full.  If the next page is full, all subsequent
1156      pages are full, so there's no need to move it.  */
1157   if (--entry->num_free_objects == 0
1158       && entry->next != NULL
1159       && entry->next->num_free_objects > 0)
1160     {
1161       /* We have a new head for the list.  */
1162       G.pages[order] = entry->next;
1163 
1164       /* We are moving ENTRY to the end of the page table list.
1165          The new page at the head of the list will have NULL in
1166          its PREV field and ENTRY will have NULL in its NEXT field.  */
1167       entry->next->prev = NULL;
1168       entry->next = NULL;
1169 
1170       /* Append ENTRY to the tail of the list.  */
1171       entry->prev = G.page_tails[order];
1172       G.page_tails[order]->next = entry;
1173       G.page_tails[order] = entry;
1174     }
1175 
1176   /* Calculate the object's address.  */
1177   result = entry->page + object_offset;
1178 #ifdef GATHER_STATISTICS
1179   ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size,
1180                        result PASS_MEM_STAT);
1181 #endif
1182 
1183 #ifdef ENABLE_GC_CHECKING
1184   /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1185      exact same semantics in presence of memory bugs, regardless of
1186      ENABLE_VALGRIND_CHECKING.  We override this request below.  Drop the
1187      handle to avoid handle leak.  */
1188   VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, object_size));
1189 
1190   /* `Poison' the entire allocated object, including any padding at
1191      the end.  */
1192   memset (result, 0xaf, object_size);
1193 
1194   /* Make the bytes after the end of the object unaccessible.  Discard the
1195      handle to avoid handle leak.  */
1196   VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS ((char *) result + size,
1197                                             object_size - size));
1198 #endif
1199 
1200   /* Tell Valgrind that the memory is there, but its content isn't
1201      defined.  The bytes at the end of the object are still marked
1202      unaccessible.  */
1203   VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
1204 
1205   /* Keep track of how many bytes are being allocated.  This
1206      information is used in deciding when to collect.  */
1207   G.allocated += object_size;
1208 
1209   /* For timevar statistics.  */
1210   timevar_ggc_mem_total += object_size;
1211 
1212 #ifdef GATHER_STATISTICS
1213   {
1214     size_t overhead = object_size - size;
1215 
1216     G.stats.total_overhead += overhead;
1217     G.stats.total_allocated += object_size;
1218     G.stats.total_overhead_per_order[order] += overhead;
1219     G.stats.total_allocated_per_order[order] += object_size;
1220 
1221     if (size <= 32)
1222       {
1223         G.stats.total_overhead_under32 += overhead;
1224         G.stats.total_allocated_under32 += object_size;
1225       }
1226     if (size <= 64)
1227       {
1228         G.stats.total_overhead_under64 += overhead;
1229         G.stats.total_allocated_under64 += object_size;
1230       }
1231     if (size <= 128)
1232       {
1233         G.stats.total_overhead_under128 += overhead;
1234         G.stats.total_allocated_under128 += object_size;
1235       }
1236   }
1237 #endif
1238 
1239   if (GGC_DEBUG_LEVEL >= 3)
1240     fprintf (G.debug_file,
1241              "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1242              (unsigned long) size, (unsigned long) object_size, result,
1243              (void *) entry);
1244 
1245   return result;
1246 }

ggc-zone.c[]

1292 /* Normal ggc_alloc simply allocates into the main zone.  */
1293 
1294 void *
1295 ggc_alloc_stat (size_t size MEM_STAT_DECL)
1296 {
1297   return ggc_alloc_zone_pass_stat (size, &main_zone);
1298 }



リンク元

Advertisement