* New ARI warning Fri Apr 20 01:58:17 UTC 2012
@ 2012-04-20 2:07 GDB Administrator
2012-04-20 2:45 ` Yao Qi
0 siblings, 1 reply; 6+ messages in thread
From: GDB Administrator @ 2012-04-20 2:07 UTC (permalink / raw)
To: gdb-patches
140a141,148
> gdb/common/vec.c:39: code: inline: Do not use the inline attribute; since the compiler generally ignores this, better algorithm selection is needed to improved performance
gdb/common/vec.c:39:static inline unsigned
> gdb/common/vec.h:419: code: inline: Do not use the inline attribute; since the compiler generally ignores this, better algorithm selection is needed to improved performance
gdb/common/vec.h:419:#define DEF_VEC_I(T) static inline void VEC_OP (T,must_be_integral_type) (void) { (void)~(T)0; } VEC_T(T); DEF_VEC_FUNC_P(T) DEF_VEC_ALLOC_FUNC_I(T) struct vec_swallow_trailing_semi
> gdb/common/vec.h:431: code: inline: Do not use the inline attribute; since the compiler generally ignores this, better algorithm selection is needed to improved performance
gdb/common/vec.h:431:#define DEF_VEC_P(T) static inline void VEC_OP (T,must_be_pointer_type) (void) { (void)((T)1 == (void *)1); } VEC_T(T); DEF_VEC_FUNC_P(T) DEF_VEC_ALLOC_FUNC_P(T) struct vec_swallow_trailing_semi
> gdb/common/vec.h:520: code: inline: Do not use the inline attribute; since the compiler generally ignores this, better algorithm selection is needed to improved performance
gdb/common/vec.h:520:#define DEF_VEC_ALLOC_FUNC_I(T) static inline VEC(T) *VEC_OP (T,alloc) (int alloc_) { return (VEC(T) *) vec_o_reserve (NULL, -alloc_, offsetof (VEC(T),vec), sizeof (T)); } static inline VEC(T) *VEC_OP (T,copy) (VEC(T) *vec_) { size_t len_ = vec_ ? vec_->num : 0; VEC (T) *new_vec_ = NULL; if (len_) { new_vec_ = (VEC (T) *) vec_o_reserve (NULL, -len_, offsetof (VEC(T),vec), sizeof (T)); new_vec_->num = len_; memcpy (new_vec_->vec, vec_->vec, sizeof (T) * len_); } return new_vec_; } static inline void VEC_OP (T,free) (VEC(T) **vec_) { if (*vec_) vec_free_ (*vec_); *vec_ = NULL; } static inline void VEC_OP (T,cleanup) (void *arg_) { VEC(T) **vec_ = arg_; if (*vec_) vec_free_ (*vec_); *vec_ = NULL; } static inline int VEC_OP (T,reserve) (VEC(T) **vec_, int alloc_ VEC_ASSERT_DECL) { int extend = !VEC_OP (T,space) (*vec_, alloc_ < 0 ? -alloc_ : alloc_ VEC_ASSERT_PASS); if (extend) *vec_ = (VEC(T) *) vec_o_reserve (*vec_, alloc_, offsetof (VEC(T),vec), sizeof (T)); return extend; } static inline void VEC_OP (T,safe_grow) (VEC(T) **vec_, int size_ VEC_ASSERT_DECL) { vec_assert (size_ >= 0 && VEC_OP(T,length) (*vec_) <= (unsigned)size_, '); VEC_OP (T,reserve) (vec_, (int)(*vec_ ? (*vec_)->num : 0) - size_ VEC_ASSERT_PASS); (*vec_)->num = size_; } static inline T *VEC_OP (T,safe_push) (VEC(T) **vec_, const T obj_ VEC_ASSERT_DECL) { VEC_OP (T,reserve) (vec_, 1 VEC_ASSERT_PASS); return VEC_OP (T,quick_push) (*vec_, obj_ VEC_ASSERT_PASS); } static inline T *VEC_OP (T,safe_insert) (VEC(T) **vec_, unsigned ix_, const T obj_ VEC_ASSERT_DECL) { VEC_OP (T,reserve) (vec_, 1 VEC_ASSERT_PASS); return VEC_OP (T,quick_insert) (*vec_, ix_, obj_ VEC_ASSERT_PASS); }
> gdb/common/vec.h:703: code: inline: Do not use the inline attribute; since the compiler generally ignores this, better algorithm selection is needed to improved performance
gdb/common/vec.h:703:#define DEF_VEC_FUNC_P(T) static inline unsigned VEC_OP (T,length) (const VEC(T) *vec_) { return vec_ ? vec_->num : 0; } static inline T VEC_OP (T,last) (const VEC(T) *vec_ VEC_ASSERT_DECL) { vec_assert (vec_ && vec_->num, '); return vec_->vec[vec_->num - 1]; } static inline T VEC_OP (T,index) (const VEC(T) *vec_, unsigned ix_ VEC_ASSERT_DECL) { vec_assert (vec_ && ix_ < vec_->num, '); return vec_->vec[ix_]; } static inline int VEC_OP (T,iterate) (const VEC(T) *vec_, unsigned ix_, T *ptr) { if (vec_ && ix_ < vec_->num) { *ptr = vec_->vec[ix_]; return 1; } else { *ptr = 0; return 0; } } static inline size_t VEC_OP (T,embedded_size) (int alloc_) { return offsetof (VEC(T),vec) + alloc_ * sizeof(T); } static inline void VEC_OP (T,embedded_init) (VEC(T) *vec_, int alloc_) { vec_->num = 0; vec_->alloc = alloc_; } static inline int VEC_OP (T,space) (VEC(T) *vec_, int alloc_ VEC_ASSERT_DECL) { vec_assert (alloc_ >= 0, '); return vec_ ? vec_->alloc - vec_->num >= (unsigned)alloc_ : !alloc_; } static inline T *VEC_OP (T,quick_push) (VEC(T) *vec_, T obj_ VEC_ASSERT_DECL) { T *slot_; vec_assert (vec_->num < vec_->alloc, '); slot_ = &vec_->vec[vec_->num++]; *slot_ = obj_; return slot_; } static inline T VEC_OP (T,pop) (VEC(T) *vec_ VEC_ASSERT_DECL) { T obj_; vec_assert (vec_->num, '); obj_ = vec_->vec[--vec_->num]; return obj_; } static inline void VEC_OP (T,truncate) (VEC(T) *vec_, unsigned size_ VEC_ASSERT_DECL) { vec_assert (vec_ ? vec_->num >= size_ : !size_, '); if (vec_) vec_->num = size_; } static inline T VEC_OP (T,replace) (VEC(T) *vec_, unsigned ix_, T obj_ VEC_ASSERT_DECL) { T old_obj_; vec_assert (ix_ < vec_->num, '); old_obj_ = vec_->vec[ix_]; vec_->vec[ix_] = obj_; return old_obj_; } static inline T *VEC_OP (T,quick_insert) (VEC(T) *vec_, unsigned ix_, T obj_ VEC_ASSERT_DECL) { T *slot_; vec_assert (vec_->num < vec_->alloc && ix_ <= vec_->num, '); slot_ = &vec_->vec[ix_]; memmove (slot_ + 1, slot_, (vec_->num++ - ix_) * sizeof (T)); *slot_ = obj_; return slot_; } static inline T VEC_OP (T,ordered_remove) (VEC(T) *vec_, unsigned ix_ VEC_ASSERT_DECL) { T *slot_; T obj_; vec_assert (ix_ < vec_->num, '); slot_ = &vec_->vec[ix_]; obj_ = *slot_; memmove (slot_, slot_ + 1, (--vec_->num - ix_) * sizeof (T)); return obj_; } static inline T VEC_OP (T,unordered_remove) (VEC(T) *vec_, unsigned ix_ VEC_ASSERT_DECL) { T *slot_; T obj_; vec_assert (ix_ < vec_->num, '); slot_ = &vec_->vec[ix_]; obj_ = *slot_; *slot_ = vec_->vec[--vec_->num]; return obj_; } static inline void VEC_OP (T,block_remove) (VEC(T) *vec_, unsigned ix_, unsigned len_ VEC_ASSERT_DECL) { T *slot_; vec_assert (ix_ + len_ <= vec_->num, '); slot_ = &vec_->vec[ix_]; vec_->num -= len_; memmove (slot_, slot_ + len_, (vec_->num - ix_) * sizeof (T)); } static inline T *VEC_OP (T,address) (VEC(T) *vec_) { return vec_ ? vec_->vec : 0; } static inline unsigned VEC_OP (T,lower_bound) (VEC(T) *vec_, const T obj_, int (*lessthan_)(const T, const T) VEC_ASSERT_DECL) { unsigned int len_ = VEC_OP (T, length) (vec_); unsigned int half_, middle_; unsigned int first_ = 0; while (len_ > 0) { T middle_elem_; half_ = len_ >> 1; middle_ = first_; middle_ += half_; middle_elem_ = VEC_OP (T,index) (vec_, middle_ VEC_ASSERT_PASS); if (lessthan_ (middle_elem_, obj_)) { first_ = middle_; ++first_; len_ = len_ - half_ - 1; } else len_ = half_; } return first_; }
> gdb/common/vec.h:782: code: inline: Do not use the inline attribute; since the compiler generally ignores this, better algorithm selection is needed to improved performance
gdb/common/vec.h:782:#define DEF_VEC_ALLOC_FUNC_P(T) static inline VEC(T) *VEC_OP (T,alloc) (int alloc_) { return (VEC(T) *) vec_p_reserve (NULL, -alloc_); } static inline void VEC_OP (T,free) (VEC(T) **vec_) { if (*vec_) vec_free_ (*vec_); *vec_ = NULL; } static inline void VEC_OP (T,cleanup) (void *arg_) { VEC(T) **vec_ = arg_; if (*vec_) vec_free_ (*vec_); *vec_ = NULL; } static inline VEC(T) *VEC_OP (T,copy) (VEC(T) *vec_) { size_t len_ = vec_ ? vec_->num : 0; VEC (T) *new_vec_ = NULL; if (len_) { new_vec_ = (VEC (T) *)(vec_p_reserve (NULL, -len_)); new_vec_->num = len_; memcpy (new_vec_->vec, vec_->vec, sizeof (T) * len_); } return new_vec_; } static inline int VEC_OP (T,reserve) (VEC(T) **vec_, int alloc_ VEC_ASSERT_DECL) { int extend = !VEC_OP (T,space) (*vec_, alloc_ < 0 ? -alloc_ : alloc_ VEC_ASSERT_PASS); if (extend) *vec_ = (VEC(T) *) vec_p_reserve (*vec_, alloc_); return extend; } static inline void VEC_OP (T,safe_grow) (VEC(T) **vec_, int size_ VEC_ASSERT_DECL) { vec_assert (size_ >= 0 && VEC_OP(T,length) (*vec_) <= (unsigned)size_, '); VEC_OP (T,reserve) (vec_, (int)(*vec_ ? (*vec_)->num : 0) - size_ VEC_ASSERT_PASS); (*vec_)->num = size_; } static inline T *VEC_OP (T,safe_push) (VEC(T) **vec_, T obj_ VEC_ASSERT_DECL) { VEC_OP (T,reserve) (vec_, 1 VEC_ASSERT_PASS); return VEC_OP (T,quick_push) (*vec_, obj_ VEC_ASSERT_PASS); } static inline T *VEC_OP (T,safe_insert) (VEC(T) **vec_, unsigned ix_, T obj_ VEC_ASSERT_DECL) { VEC_OP (T,reserve) (vec_, 1 VEC_ASSERT_PASS); return VEC_OP (T,quick_insert) (*vec_, ix_, obj_ VEC_ASSERT_PASS); }
> gdb/common/vec.h:952: code: inline: Do not use the inline attribute; since the compiler generally ignores this, better algorithm selection is needed to improved performance
gdb/common/vec.h:952:#define DEF_VEC_FUNC_O(T) static inline unsigned VEC_OP (T,length) (const VEC(T) *vec_) { return vec_ ? vec_->num : 0; } static inline T *VEC_OP (T,last) (VEC(T) *vec_ VEC_ASSERT_DECL) { vec_assert (vec_ && vec_->num, '); return &vec_->vec[vec_->num - 1]; } static inline T *VEC_OP (T,index) (VEC(T) *vec_, unsigned ix_ VEC_ASSERT_DECL) { vec_assert (vec_ && ix_ < vec_->num, '); return &vec_->vec[ix_]; } static inline int VEC_OP (T,iterate) (VEC(T) *vec_, unsigned ix_, T **ptr) { if (vec_ && ix_ < vec_->num) { *ptr = &vec_->vec[ix_]; return 1; } else { *ptr = 0; return 0; } } static inline size_t VEC_OP (T,embedded_size) (int alloc_) { return offsetof (VEC(T),vec) + alloc_ * sizeof(T); } static inline void VEC_OP (T,embedded_init) (VEC(T) *vec_, int alloc_) { vec_->num = 0; vec_->alloc = alloc_; } static inline int VEC_OP (T,space) (VEC(T) *vec_, int alloc_ VEC_ASSERT_DECL) { vec_assert (alloc_ >= 0, '); return vec_ ? vec_->alloc - vec_->num >= (unsigned)alloc_ : !alloc_; } static inline T *VEC_OP (T,quick_push) (VEC(T) *vec_, const T *obj_ VEC_ASSERT_DECL) { T *slot_; vec_assert (vec_->num < vec_->alloc, '); slot_ = &vec_->vec[vec_->num++]; if (obj_) *slot_ = *obj_; return slot_; } static inline void VEC_OP (T,pop) (VEC(T) *vec_ VEC_ASSERT_DECL) { vec_assert (vec_->num, '); --vec_->num; } static inline void VEC_OP (T,truncate) (VEC(T) *vec_, unsigned size_ VEC_ASSERT_DECL) { vec_assert (vec_ ? vec_->num >= size_ : !size_, '); if (vec_) vec_->num = size_; } static inline T *VEC_OP (T,replace) (VEC(T) *vec_, unsigned ix_, const T *obj_ VEC_ASSERT_DECL) { T *slot_; vec_assert (ix_ < vec_->num, '); slot_ = &vec_->vec[ix_]; if (obj_) *slot_ = *obj_; return slot_; } static inline T *VEC_OP (T,quick_insert) (VEC(T) *vec_, unsigned ix_, const T *obj_ VEC_ASSERT_DECL) { T *slot_; vec_assert (vec_->num < vec_->alloc && ix_ <= vec_->num, '); slot_ = &vec_->vec[ix_]; memmove (slot_ + 1, slot_, (vec_->num++ - ix_) * sizeof (T)); if (obj_) *slot_ = *obj_; return slot_; } static inline void VEC_OP (T,ordered_remove) (VEC(T) *vec_, unsigned ix_ VEC_ASSERT_DECL) { T *slot_; vec_assert (ix_ < vec_->num, '); slot_ = &vec_->vec[ix_]; memmove (slot_, slot_ + 1, (--vec_->num - ix_) * sizeof (T)); } static inline void VEC_OP (T,unordered_remove) (VEC(T) *vec_, unsigned ix_ VEC_ASSERT_DECL) { vec_assert (ix_ < vec_->num, '); vec_->vec[ix_] = vec_->vec[--vec_->num]; } static inline void VEC_OP (T,block_remove) (VEC(T) *vec_, unsigned ix_, unsigned len_ VEC_ASSERT_DECL) { T *slot_; vec_assert (ix_ + len_ <= vec_->num, '); slot_ = &vec_->vec[ix_]; vec_->num -= len_; memmove (slot_, slot_ + len_, (vec_->num - ix_) * sizeof (T)); } static inline T *VEC_OP (T,address) (VEC(T) *vec_) { return vec_ ? vec_->vec : 0; } static inline unsigned VEC_OP (T,lower_bound) (VEC(T) *vec_, const T *obj_, int (*lessthan_)(const T *, const T *) VEC_ASSERT_DECL) { unsigned int len_ = VEC_OP (T, length) (vec_); unsigned int half_, middle_; unsigned int first_ = 0; while (len_ > 0) { T *middle_elem_; half_ = len_ >> 1; middle_ = first_; middle_ += half_; middle_elem_ = VEC_OP (T,index) (vec_, middle_ VEC_ASSERT_PASS); if (lessthan_ (middle_elem_, obj_)) { first_ = middle_; ++first_; len_ = len_ - half_ - 1; } else len_ = half_; } return first_; }
> gdb/common/vec.h:1034: code: inline: Do not use the inline attribute; since the compiler generally ignores this, better algorithm selection is needed to improved performance
gdb/common/vec.h:1034:#define DEF_VEC_ALLOC_FUNC_O(T) static inline VEC(T) *VEC_OP (T,alloc) (int alloc_) { return (VEC(T) *) vec_o_reserve (NULL, -alloc_, offsetof (VEC(T),vec), sizeof (T)); } static inline VEC(T) *VEC_OP (T,copy) (VEC(T) *vec_) { size_t len_ = vec_ ? vec_->num : 0; VEC (T) *new_vec_ = NULL; if (len_) { new_vec_ = (VEC (T) *) vec_o_reserve (NULL, -len_, offsetof (VEC(T),vec), sizeof (T)); new_vec_->num = len_; memcpy (new_vec_->vec, vec_->vec, sizeof (T) * len_); } return new_vec_; } static inline void VEC_OP (T,free) (VEC(T) **vec_) { if (*vec_) vec_free_ (*vec_); *vec_ = NULL; } static inline void VEC_OP (T,cleanup) (void *arg_) { VEC(T) **vec_ = arg_; if (*vec_) vec_free_ (*vec_); *vec_ = NULL; } static inline int VEC_OP (T,reserve) (VEC(T) **vec_, int alloc_ VEC_ASSERT_DECL) { int extend = !VEC_OP (T,space) (*vec_, alloc_ < 0 ? -alloc_ : alloc_ VEC_ASSERT_PASS); if (extend) *vec_ = (VEC(T) *) vec_o_reserve (*vec_, alloc_, offsetof (VEC(T),vec), sizeof (T)); return extend; } static inline void VEC_OP (T,safe_grow) (VEC(T) **vec_, int size_ VEC_ASSERT_DECL) { vec_assert (size_ >= 0 && VEC_OP(T,length) (*vec_) <= (unsigned)size_, '); VEC_OP (T,reserve) (vec_, (int)(*vec_ ? (*vec_)->num : 0) - size_ VEC_ASSERT_PASS); (*vec_)->num = size_; } static inline T *VEC_OP (T,safe_push) (VEC(T) **vec_, const T *obj_ VEC_ASSERT_DECL) { VEC_OP (T,reserve) (vec_, 1 VEC_ASSERT_PASS); return VEC_OP (T,quick_push) (*vec_, obj_ VEC_ASSERT_PASS); } static inline T *VEC_OP (T,safe_insert) (VEC(T) **vec_, unsigned ix_, const T *obj_ VEC_ASSERT_DECL) { VEC_OP (T,reserve) (vec_, 1 VEC_ASSERT_PASS); return VEC_OP (T,quick_insert) (*vec_, ix_, obj_ VEC_ASSERT_PASS); }
1033,1040d1040
< gdb/vec.c:34: code: inline: Do not use the inline attribute; since the compiler generally ignores this, better algorithm selection is needed to improved performance
gdb/vec.c:34:static inline unsigned
< gdb/vec.h:416: code: inline: Do not use the inline attribute; since the compiler generally ignores this, better algorithm selection is needed to improved performance
gdb/vec.h:416:#define DEF_VEC_I(T) static inline void VEC_OP (T,must_be_integral_type) (void) { (void)~(T)0; } VEC_T(T); DEF_VEC_FUNC_P(T) DEF_VEC_ALLOC_FUNC_I(T) struct vec_swallow_trailing_semi
< gdb/vec.h:428: code: inline: Do not use the inline attribute; since the compiler generally ignores this, better algorithm selection is needed to improved performance
gdb/vec.h:428:#define DEF_VEC_P(T) static inline void VEC_OP (T,must_be_pointer_type) (void) { (void)((T)1 == (void *)1); } VEC_T(T); DEF_VEC_FUNC_P(T) DEF_VEC_ALLOC_FUNC_P(T) struct vec_swallow_trailing_semi
< gdb/vec.h:517: code: inline: Do not use the inline attribute; since the compiler generally ignores this, better algorithm selection is needed to improved performance
gdb/vec.h:517:#define DEF_VEC_ALLOC_FUNC_I(T) static inline VEC(T) *VEC_OP (T,alloc) (int alloc_) { return (VEC(T) *) vec_o_reserve (NULL, -alloc_, offsetof (VEC(T),vec), sizeof (T)); } static inline VEC(T) *VEC_OP (T,copy) (VEC(T) *vec_) { size_t len_ = vec_ ? vec_->num : 0; VEC (T) *new_vec_ = NULL; if (len_) { new_vec_ = (VEC (T) *) vec_o_reserve (NULL, -len_, offsetof (VEC(T),vec), sizeof (T)); new_vec_->num = len_; memcpy (new_vec_->vec, vec_->vec, sizeof (T) * len_); } return new_vec_; } static inline void VEC_OP (T,free) (VEC(T) **vec_) { if (*vec_) vec_free_ (*vec_); *vec_ = NULL; } static inline void VEC_OP (T,cleanup) (void *arg_) { VEC(T) **vec_ = arg_; if (*vec_) vec_free_ (*vec_); *vec_ = NULL; } static inline int VEC_OP (T,reserve) (VEC(T) **vec_, int alloc_ VEC_ASSERT_DECL) { int extend = !VEC_OP (T,space) (*vec_, alloc_ < 0 ? -alloc_ : alloc_ VEC_ASSERT_PASS); if (extend) *vec_ = (VEC(T) *) vec_o_reserve (*vec_, alloc_, offsetof (VEC(T),vec), sizeof (T)); return extend; } static inline void VEC_OP (T,safe_grow) (VEC(T) **vec_, int size_ VEC_ASSERT_DECL) { vec_assert (size_ >= 0 && VEC_OP(T,length) (*vec_) <= (unsigned)size_, '); VEC_OP (T,reserve) (vec_, (int)(*vec_ ? (*vec_)->num : 0) - size_ VEC_ASSERT_PASS); (*vec_)->num = size_; } static inline T *VEC_OP (T,safe_push) (VEC(T) **vec_, const T obj_ VEC_ASSERT_DECL) { VEC_OP (T,reserve) (vec_, 1 VEC_ASSERT_PASS); return VEC_OP (T,quick_push) (*vec_, obj_ VEC_ASSERT_PASS); } static inline T *VEC_OP (T,safe_insert) (VEC(T) **vec_, unsigned ix_, const T obj_ VEC_ASSERT_DECL) { VEC_OP (T,reserve) (vec_, 1 VEC_ASSERT_PASS); return VEC_OP (T,quick_insert) (*vec_, ix_, obj_ VEC_ASSERT_PASS); }
< gdb/vec.h:700: code: inline: Do not use the inline attribute; since the compiler generally ignores this, better algorithm selection is needed to improved performance
gdb/vec.h:700:#define DEF_VEC_FUNC_P(T) static inline unsigned VEC_OP (T,length) (const VEC(T) *vec_) { return vec_ ? vec_->num : 0; } static inline T VEC_OP (T,last) (const VEC(T) *vec_ VEC_ASSERT_DECL) { vec_assert (vec_ && vec_->num, '); return vec_->vec[vec_->num - 1]; } static inline T VEC_OP (T,index) (const VEC(T) *vec_, unsigned ix_ VEC_ASSERT_DECL) { vec_assert (vec_ && ix_ < vec_->num, '); return vec_->vec[ix_]; } static inline int VEC_OP (T,iterate) (const VEC(T) *vec_, unsigned ix_, T *ptr) { if (vec_ && ix_ < vec_->num) { *ptr = vec_->vec[ix_]; return 1; } else { *ptr = 0; return 0; } } static inline size_t VEC_OP (T,embedded_size) (int alloc_) { return offsetof (VEC(T),vec) + alloc_ * sizeof(T); } static inline void VEC_OP (T,embedded_init) (VEC(T) *vec_, int alloc_) { vec_->num = 0; vec_->alloc = alloc_; } static inline int VEC_OP (T,space) (VEC(T) *vec_, int alloc_ VEC_ASSERT_DECL) { vec_assert (alloc_ >= 0, '); return vec_ ? vec_->alloc - vec_->num >= (unsigned)alloc_ : !alloc_; } static inline T *VEC_OP (T,quick_push) (VEC(T) *vec_, T obj_ VEC_ASSERT_DECL) { T *slot_; vec_assert (vec_->num < vec_->alloc, '); slot_ = &vec_->vec[vec_->num++]; *slot_ = obj_; return slot_; } static inline T VEC_OP (T,pop) (VEC(T) *vec_ VEC_ASSERT_DECL) { T obj_; vec_assert (vec_->num, '); obj_ = vec_->vec[--vec_->num]; return obj_; } static inline void VEC_OP (T,truncate) (VEC(T) *vec_, unsigned size_ VEC_ASSERT_DECL) { vec_assert (vec_ ? vec_->num >= size_ : !size_, '); if (vec_) vec_->num = size_; } static inline T VEC_OP (T,replace) (VEC(T) *vec_, unsigned ix_, T obj_ VEC_ASSERT_DECL) { T old_obj_; vec_assert (ix_ < vec_->num, '); old_obj_ = vec_->vec[ix_]; vec_->vec[ix_] = obj_; return old_obj_; } static inline T *VEC_OP (T,quick_insert) (VEC(T) *vec_, unsigned ix_, T obj_ VEC_ASSERT_DECL) { T *slot_; vec_assert (vec_->num < vec_->alloc && ix_ <= vec_->num, '); slot_ = &vec_->vec[ix_]; memmove (slot_ + 1, slot_, (vec_->num++ - ix_) * sizeof (T)); *slot_ = obj_; return slot_; } static inline T VEC_OP (T,ordered_remove) (VEC(T) *vec_, unsigned ix_ VEC_ASSERT_DECL) { T *slot_; T obj_; vec_assert (ix_ < vec_->num, '); slot_ = &vec_->vec[ix_]; obj_ = *slot_; memmove (slot_, slot_ + 1, (--vec_->num - ix_) * sizeof (T)); return obj_; } static inline T VEC_OP (T,unordered_remove) (VEC(T) *vec_, unsigned ix_ VEC_ASSERT_DECL) { T *slot_; T obj_; vec_assert (ix_ < vec_->num, '); slot_ = &vec_->vec[ix_]; obj_ = *slot_; *slot_ = vec_->vec[--vec_->num]; return obj_; } static inline void VEC_OP (T,block_remove) (VEC(T) *vec_, unsigned ix_, unsigned len_ VEC_ASSERT_DECL) { T *slot_; vec_assert (ix_ + len_ <= vec_->num, '); slot_ = &vec_->vec[ix_]; vec_->num -= len_; memmove (slot_, slot_ + len_, (vec_->num - ix_) * sizeof (T)); } static inline T *VEC_OP (T,address) (VEC(T) *vec_) { return vec_ ? vec_->vec : 0; } static inline unsigned VEC_OP (T,lower_bound) (VEC(T) *vec_, const T obj_, int (*lessthan_)(const T, const T) VEC_ASSERT_DECL) { unsigned int len_ = VEC_OP (T, length) (vec_); unsigned int half_, middle_; unsigned int first_ = 0; while (len_ > 0) { T middle_elem_; half_ = len_ >> 1; middle_ = first_; middle_ += half_; middle_elem_ = VEC_OP (T,index) (vec_, middle_ VEC_ASSERT_PASS); if (lessthan_ (middle_elem_, obj_)) { first_ = middle_; ++first_; len_ = len_ - half_ - 1; } else len_ = half_; } return first_; }
< gdb/vec.h:779: code: inline: Do not use the inline attribute; since the compiler generally ignores this, better algorithm selection is needed to improved performance
gdb/vec.h:779:#define DEF_VEC_ALLOC_FUNC_P(T) static inline VEC(T) *VEC_OP (T,alloc) (int alloc_) { return (VEC(T) *) vec_p_reserve (NULL, -alloc_); } static inline void VEC_OP (T,free) (VEC(T) **vec_) { if (*vec_) vec_free_ (*vec_); *vec_ = NULL; } static inline void VEC_OP (T,cleanup) (void *arg_) { VEC(T) **vec_ = arg_; if (*vec_) vec_free_ (*vec_); *vec_ = NULL; } static inline VEC(T) *VEC_OP (T,copy) (VEC(T) *vec_) { size_t len_ = vec_ ? vec_->num : 0; VEC (T) *new_vec_ = NULL; if (len_) { new_vec_ = (VEC (T) *)(vec_p_reserve (NULL, -len_)); new_vec_->num = len_; memcpy (new_vec_->vec, vec_->vec, sizeof (T) * len_); } return new_vec_; } static inline int VEC_OP (T,reserve) (VEC(T) **vec_, int alloc_ VEC_ASSERT_DECL) { int extend = !VEC_OP (T,space) (*vec_, alloc_ < 0 ? -alloc_ : alloc_ VEC_ASSERT_PASS); if (extend) *vec_ = (VEC(T) *) vec_p_reserve (*vec_, alloc_); return extend; } static inline void VEC_OP (T,safe_grow) (VEC(T) **vec_, int size_ VEC_ASSERT_DECL) { vec_assert (size_ >= 0 && VEC_OP(T,length) (*vec_) <= (unsigned)size_, '); VEC_OP (T,reserve) (vec_, (int)(*vec_ ? (*vec_)->num : 0) - size_ VEC_ASSERT_PASS); (*vec_)->num = size_; } static inline T *VEC_OP (T,safe_push) (VEC(T) **vec_, T obj_ VEC_ASSERT_DECL) { VEC_OP (T,reserve) (vec_, 1 VEC_ASSERT_PASS); return VEC_OP (T,quick_push) (*vec_, obj_ VEC_ASSERT_PASS); } static inline T *VEC_OP (T,safe_insert) (VEC(T) **vec_, unsigned ix_, T obj_ VEC_ASSERT_DECL) { VEC_OP (T,reserve) (vec_, 1 VEC_ASSERT_PASS); return VEC_OP (T,quick_insert) (*vec_, ix_, obj_ VEC_ASSERT_PASS); }
< gdb/vec.h:949: code: inline: Do not use the inline attribute; since the compiler generally ignores this, better algorithm selection is needed to improved performance
gdb/vec.h:949:#define DEF_VEC_FUNC_O(T) static inline unsigned VEC_OP (T,length) (const VEC(T) *vec_) { return vec_ ? vec_->num : 0; } static inline T *VEC_OP (T,last) (VEC(T) *vec_ VEC_ASSERT_DECL) { vec_assert (vec_ && vec_->num, '); return &vec_->vec[vec_->num - 1]; } static inline T *VEC_OP (T,index) (VEC(T) *vec_, unsigned ix_ VEC_ASSERT_DECL) { vec_assert (vec_ && ix_ < vec_->num, '); return &vec_->vec[ix_]; } static inline int VEC_OP (T,iterate) (VEC(T) *vec_, unsigned ix_, T **ptr) { if (vec_ && ix_ < vec_->num) { *ptr = &vec_->vec[ix_]; return 1; } else { *ptr = 0; return 0; } } static inline size_t VEC_OP (T,embedded_size) (int alloc_) { return offsetof (VEC(T),vec) + alloc_ * sizeof(T); } static inline void VEC_OP (T,embedded_init) (VEC(T) *vec_, int alloc_) { vec_->num = 0; vec_->alloc = alloc_; } static inline int VEC_OP (T,space) (VEC(T) *vec_, int alloc_ VEC_ASSERT_DECL) { vec_assert (alloc_ >= 0, '); return vec_ ? vec_->alloc - vec_->num >= (unsigned)alloc_ : !alloc_; } static inline T *VEC_OP (T,quick_push) (VEC(T) *vec_, const T *obj_ VEC_ASSERT_DECL) { T *slot_; vec_assert (vec_->num < vec_->alloc, '); slot_ = &vec_->vec[vec_->num++]; if (obj_) *slot_ = *obj_; return slot_; } static inline void VEC_OP (T,pop) (VEC(T) *vec_ VEC_ASSERT_DECL) { vec_assert (vec_->num, '); --vec_->num; } static inline void VEC_OP (T,truncate) (VEC(T) *vec_, unsigned size_ VEC_ASSERT_DECL) { vec_assert (vec_ ? vec_->num >= size_ : !size_, '); if (vec_) vec_->num = size_; } static inline T *VEC_OP (T,replace) (VEC(T) *vec_, unsigned ix_, const T *obj_ VEC_ASSERT_DECL) { T *slot_; vec_assert (ix_ < vec_->num, '); slot_ = &vec_->vec[ix_]; if (obj_) *slot_ = *obj_; return slot_; } static inline T *VEC_OP (T,quick_insert) (VEC(T) *vec_, unsigned ix_, const T *obj_ VEC_ASSERT_DECL) { T *slot_; vec_assert (vec_->num < vec_->alloc && ix_ <= vec_->num, '); slot_ = &vec_->vec[ix_]; memmove (slot_ + 1, slot_, (vec_->num++ - ix_) * sizeof (T)); if (obj_) *slot_ = *obj_; return slot_; } static inline void VEC_OP (T,ordered_remove) (VEC(T) *vec_, unsigned ix_ VEC_ASSERT_DECL) { T *slot_; vec_assert (ix_ < vec_->num, '); slot_ = &vec_->vec[ix_]; memmove (slot_, slot_ + 1, (--vec_->num - ix_) * sizeof (T)); } static inline void VEC_OP (T,unordered_remove) (VEC(T) *vec_, unsigned ix_ VEC_ASSERT_DECL) { vec_assert (ix_ < vec_->num, '); vec_->vec[ix_] = vec_->vec[--vec_->num]; } static inline void VEC_OP (T,block_remove) (VEC(T) *vec_, unsigned ix_, unsigned len_ VEC_ASSERT_DECL) { T *slot_; vec_assert (ix_ + len_ <= vec_->num, '); slot_ = &vec_->vec[ix_]; vec_->num -= len_; memmove (slot_, slot_ + len_, (vec_->num - ix_) * sizeof (T)); } static inline T *VEC_OP (T,address) (VEC(T) *vec_) { return vec_ ? vec_->vec : 0; } static inline unsigned VEC_OP (T,lower_bound) (VEC(T) *vec_, const T *obj_, int (*lessthan_)(const T *, const T *) VEC_ASSERT_DECL) { unsigned int len_ = VEC_OP (T, length) (vec_); unsigned int half_, middle_; unsigned int first_ = 0; while (len_ > 0) { T *middle_elem_; half_ = len_ >> 1; middle_ = first_; middle_ += half_; middle_elem_ = VEC_OP (T,index) (vec_, middle_ VEC_ASSERT_PASS); if (lessthan_ (middle_elem_, obj_)) { first_ = middle_; ++first_; len_ = len_ - half_ - 1; } else len_ = half_; } return first_; }
< gdb/vec.h:1031: code: inline: Do not use the inline attribute; since the compiler generally ignores this, better algorithm selection is needed to improved performance
gdb/vec.h:1031:#define DEF_VEC_ALLOC_FUNC_O(T) static inline VEC(T) *VEC_OP (T,alloc) (int alloc_) { return (VEC(T) *) vec_o_reserve (NULL, -alloc_, offsetof (VEC(T),vec), sizeof (T)); } static inline VEC(T) *VEC_OP (T,copy) (VEC(T) *vec_) { size_t len_ = vec_ ? vec_->num : 0; VEC (T) *new_vec_ = NULL; if (len_) { new_vec_ = (VEC (T) *) vec_o_reserve (NULL, -len_, offsetof (VEC(T),vec), sizeof (T)); new_vec_->num = len_; memcpy (new_vec_->vec, vec_->vec, sizeof (T) * len_); } return new_vec_; } static inline void VEC_OP (T,free) (VEC(T) **vec_) { if (*vec_) vec_free_ (*vec_); *vec_ = NULL; } static inline void VEC_OP (T,cleanup) (void *arg_) { VEC(T) **vec_ = arg_; if (*vec_) vec_free_ (*vec_); *vec_ = NULL; } static inline int VEC_OP (T,reserve) (VEC(T) **vec_, int alloc_ VEC_ASSERT_DECL) { int extend = !VEC_OP (T,space) (*vec_, alloc_ < 0 ? -alloc_ : alloc_ VEC_ASSERT_PASS); if (extend) *vec_ = (VEC(T) *) vec_o_reserve (*vec_, alloc_, offsetof (VEC(T),vec), sizeof (T)); return extend; } static inline void VEC_OP (T,safe_grow) (VEC(T) **vec_, int size_ VEC_ASSERT_DECL) { vec_assert (size_ >= 0 && VEC_OP(T,length) (*vec_) <= (unsigned)size_, '); VEC_OP (T,reserve) (vec_, (int)(*vec_ ? (*vec_)->num : 0) - size_ VEC_ASSERT_PASS); (*vec_)->num = size_; } static inline T *VEC_OP (T,safe_push) (VEC(T) **vec_, const T *obj_ VEC_ASSERT_DECL) { VEC_OP (T,reserve) (vec_, 1 VEC_ASSERT_PASS); return VEC_OP (T,quick_push) (*vec_, obj_ VEC_ASSERT_PASS); } static inline T *VEC_OP (T,safe_insert) (VEC(T) **vec_, unsigned ix_, const T *obj_ VEC_ASSERT_DECL) { VEC_OP (T,reserve) (vec_, 1 VEC_ASSERT_PASS); return VEC_OP (T,quick_insert) (*vec_, ix_, obj_ VEC_ASSERT_PASS); }
^ permalink raw reply [flat|nested] 6+ messages in thread* Re: New ARI warning Fri Apr 20 01:58:17 UTC 2012
2012-04-20 2:07 New ARI warning Fri Apr 20 01:58:17 UTC 2012 GDB Administrator
@ 2012-04-20 2:45 ` Yao Qi
2012-04-20 9:48 ` Joel Brobecker
0 siblings, 1 reply; 6+ messages in thread
From: Yao Qi @ 2012-04-20 2:45 UTC (permalink / raw)
To: gdb-patches
On 04/20/2012 09:58 AM, GDB Administrator wrote:
> 140a141,148
>> > gdb/common/vec.c:39: code: inline: Do not use the inline attribute; since the compiler generally ignores this, better algorithm selection is needed to improved performance
> gdb/common/vec.c:39:static inline unsigned
>> > gdb/common/vec.h:419: code: inline: Do not use the inline attribute; since the compiler generally ignores this, better algorithm selection is needed to improved performance
IIUC, these alarms are moved from vec.[ch] to common/vec.[ch], so they
can be ignored. Let me know if I am wrong.
--
Yao (é½å°§)
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: New ARI warning Fri Apr 20 01:58:17 UTC 2012
2012-04-20 2:45 ` Yao Qi
@ 2012-04-20 9:48 ` Joel Brobecker
2012-04-20 10:04 ` Eli Zaretskii
0 siblings, 1 reply; 6+ messages in thread
From: Joel Brobecker @ 2012-04-20 9:48 UTC (permalink / raw)
To: Yao Qi; +Cc: gdb-patches
> IIUC, these alarms are moved from vec.[ch] to common/vec.[ch], so they
> can be ignored. Let me know if I am wrong.
Generally speaking, if there are only a few, I personally think
that we should at least try to address them, especially if it is
easy. But that shouldn't necessarily have to be addressed by
the person who moved the code.
In this particular case, I kind of agree with the warning, but
at the same time, I don't really see why we should necessarily
ban the use of the "inline" keyword. I'm actually considering
the idea of getting rid of this ARI rule.
Any opinion on this topic? My tendency is to discourage the use
of that keyword unless it has been shown to make a difference.
But it's only based on what more experienced engineers have
told me, and I never really had the need to verify it.
--
Joel
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: New ARI warning Fri Apr 20 01:58:17 UTC 2012
2012-04-20 9:48 ` Joel Brobecker
@ 2012-04-20 10:04 ` Eli Zaretskii
2012-04-20 11:14 ` Pierre Muller
[not found] ` <43835.7730432928$1334919059@news.gmane.org>
0 siblings, 2 replies; 6+ messages in thread
From: Eli Zaretskii @ 2012-04-20 10:04 UTC (permalink / raw)
To: Joel Brobecker; +Cc: yao, gdb-patches
> Date: Thu, 19 Apr 2012 21:38:07 -0700
> From: Joel Brobecker <brobecker@adacore.com>
> Cc: gdb-patches@sourceware.org
>
> In this particular case, I kind of agree with the warning, but
> at the same time, I don't really see why we should necessarily
> ban the use of the "inline" keyword. I'm actually considering
> the idea of getting rid of this ARI rule.
>
> Any opinion on this topic?
Absolutely, the warning should be tossed. There's nothing wrong with
using it.
^ permalink raw reply [flat|nested] 6+ messages in thread* RE: New ARI warning Fri Apr 20 01:58:17 UTC 2012
2012-04-20 10:04 ` Eli Zaretskii
@ 2012-04-20 11:14 ` Pierre Muller
[not found] ` <43835.7730432928$1334919059@news.gmane.org>
1 sibling, 0 replies; 6+ messages in thread
From: Pierre Muller @ 2012-04-20 11:14 UTC (permalink / raw)
To: 'Eli Zaretskii', 'Joel Brobecker'; +Cc: yao, gdb-patches
> -----Message d'origine-----
> De : gdb-patches-owner@sourceware.org [mailto:gdb-patches-
> owner@sourceware.org] De la part de Eli Zaretskii
> Envoyé : vendredi 20 avril 2012 11:48
> À : Joel Brobecker
> Cc : yao@codesourcery.com; gdb-patches@sourceware.org
> Objet : Re: New ARI warning Fri Apr 20 01:58:17 UTC 2012
>
> > Date: Thu, 19 Apr 2012 21:38:07 -0700
> > From: Joel Brobecker <brobecker@adacore.com>
> > Cc: gdb-patches@sourceware.org
> >
> > In this particular case, I kind of agree with the warning, but
> > at the same time, I don't really see why we should necessarily
> > ban the use of the "inline" keyword. I'm actually considering
> > the idea of getting rid of this ARI rule.
> >
> > Any opinion on this topic?
>
> Absolutely, the warning should be tossed. There's nothing wrong with
> using it.
I have no opinion on the removal of this rule,
I would just like to be sure that,
if we allow to use inline function,
the configure step will correctly define inline as nothing
on systems using compilers that do not support inline functions.
Does anyone know if this is done inside GDB configuration step?
Pierre Muller
As un-official GDB ARI maintainer ...
^ permalink raw reply [flat|nested] 6+ messages in thread[parent not found: <43835.7730432928$1334919059@news.gmane.org>]
* Re: New ARI warning Fri Apr 20 01:58:17 UTC 2012
[not found] ` <43835.7730432928$1334919059@news.gmane.org>
@ 2012-04-20 12:41 ` Andreas Schwab
0 siblings, 0 replies; 6+ messages in thread
From: Andreas Schwab @ 2012-04-20 12:41 UTC (permalink / raw)
To: Pierre Muller
Cc: 'Eli Zaretskii', 'Joel Brobecker', yao, gdb-patches
"Pierre Muller" <pierre.muller@ics-cnrs.unistra.fr> writes:
> I have no opinion on the removal of this rule,
> I would just like to be sure that,
> if we allow to use inline function,
> the configure step will correctly define inline as nothing
> on systems using compilers that do not support inline functions.
>
> Does anyone know if this is done inside GDB configuration step?
AC_C_INLINE in gdb/configure.ac.
Andreas.
--
Andreas Schwab, schwab@linux-m68k.org
GPG Key fingerprint = 58CA 54C7 6D53 942B 1756 01D3 44D5 214B 8276 4ED5
"And now for something completely different."
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2012-04-20 12:12 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2012-04-20 2:07 New ARI warning Fri Apr 20 01:58:17 UTC 2012 GDB Administrator
2012-04-20 2:45 ` Yao Qi
2012-04-20 9:48 ` Joel Brobecker
2012-04-20 10:04 ` Eli Zaretskii
2012-04-20 11:14 ` Pierre Muller
[not found] ` <43835.7730432928$1334919059@news.gmane.org>
2012-04-20 12:41 ` Andreas Schwab
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox